repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
Angelica137/genPrimes
https://github.com/Angelica137/genPrimes
1950921577b45c3c562d4e99fa922e83eb221705
1f8d452d07f8f68fd39e25e8e84f2c971ffda737
21b81d5d512e2eb09b4c936e8f59655f62d605ce
refs/heads/master
2022-11-21T09:50:43.475389
2020-07-21T14:46:01
2020-07-21T14:46:01
281,422,702
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6331360936164856, "avg_line_length": 23.285715103149414, "blob_id": "9b7864b1d7e87d0a26148f5cc6a2db6f8d786183", "content_id": "a9bf023de4002089d23468ec56c5c7268c684806", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/tests/test_genPrimes.py", "repo_name": "Angelica137/genPrimes", "src_encoding": "UTF-8", "text": "from scripts.genPrimes import genPrimes\n\ndef test_next_method_genPrimes():\n g = genPrimes()\n assert g.next() == 2\n assert g.next() == 3\n assert g.next() == 5" }, { "alpha_fraction": 0.6824324131011963, "alphanum_fraction": 0.7229729890823364, "avg_line_length": 48.33333206176758, "blob_id": "ba0af31f0db0b60b232736578daea69857b083a1", "content_id": "a7267da1a23587c648cbb833ca63a5eb635dea8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 148, "license_type": "no_license", "max_line_length": 134, "num_lines": 3, "path": "/README.md", "repo_name": "Angelica137/genPrimes", "src_encoding": "UTF-8", "text": "# GenPrimes\n\nWrite a generator, genPrimes, that returns the sequence of prime numbers on successive calls to its next() method: 2, 3, 5, 7, 11, ...\n" } ]
2
reeveress/monitor-control-before-july-2017-trip
https://github.com/reeveress/monitor-control-before-july-2017-trip
a207a77cb0c12228ab201a6d3eea7420c48facc3
5cb2412482ed9f3b4bb6511310ccef07c2d416eb
2aeb24601b34d2b0b4cbd93d04b5373ef4e50b45
refs/heads/master
2021-06-21T09:56:00.743015
2017-07-24T19:16:39
2017-07-24T19:16:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5838414430618286, "alphanum_fraction": 0.6025696992874146, "avg_line_length": 33.76515197753906, "blob_id": "4826cca16301767552248f3c176a4d3d100fa3b1", "content_id": "f3a0a895d2245a4f345834b76eb726b8681b17e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4592, "license_type": "no_license", "max_line_length": 105, "num_lines": 132, "path": "/udpClient.py", "repo_name": "reeveress/monitor-control-before-july-2017-trip", "src_encoding": "UTF-8", "text": "\n'''\n\tUDP socket server\n'''\n\n\n\n# Redis Monitor Control Database hash structure\n#\n# key status:node:<zero indexed node ID>\n#\n# fields\t temps <array of sensor temperatures>\n#\t humidities <array of humidities> \n# \t \t airflow airflow measured by the node\n# \t \t cpu-uptime uptime of node CPU, in seconds\n\n\n\n\nimport time\nimport datetime\nimport struct\nimport redis\nimport socket\nimport sys\n\n\nPORT = 8888\nlocalAddress = ('10.0.1.224', PORT) # hera-nuc IP\naddressArduino = ('10.1.1.247', PORT) # arduino IP\n\n\n\n\n# Instantiate a Redis object and bind it to an existing running Redis server\nr = redis.StrictRedis()\n\n# Create a UDP socket\ntry:\n\tclient_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tprint('Socket created')\nexcept socket.error, msg:\n\tprint('Failed to create socket. Error Code : ' + str(msg[0]) + ' Message ' + str(msg[1]))\n\tsys.exit()\n\n#client_socket.settimeout(1)\n\n\n# Bind socket to local host and port\ntry: \n\tclient_socket.bind(localAddress)\n\tprint('Bound socket')\nexcept socket.error , msg:\n\tprint('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])\n\tsys.exit()\n\n\n\nwhile 1:\n print(\"Collecting Data and pushing to Redis...\")\n\t# Receive data continuously from the server (Arduino in this case)\n\tdata, addr = client_socket.recvfrom(1024)\n\n\t# Arduino sends a Struct via UDP so unpacking is needed \n # struct.unpack returns a tuple with one element\n\tunpacked_mcptemp = struct.unpack('=f',data[0:4])\n\tunpacked_htutemp = struct.unpack('=f', data[4:8])\n\tunpacked_htuhumid = struct.unpack('=f', data[8:12])\n\n# print('Unpack type: ', type(unpacked_mcptemp))\n#\tprint('MCP9808 Temperature: ' ,unpacked_mcptemp[0])\n#\tprint('HTU21DF Temperature: ', unpacked_htutemp[0])\n#\tprint('HTU21DF Humidity: ', unpacked_htuhumid[0])\n \n\t# Set hashes in Redis composed of sensor temperature values\n\tr.hmset('status:node:0', {'tempBot':unpacked_htutemp[0]})\n r.hmset('status:node:0', {'tempMid':unpacked_mcptemp[0]})\n\tr.hmset('status:node:0', {'humidBot':unpacked_htuhumid[0]})\n\n # Set timestamp \n r.hmset('status:node:0', {'timestamp':str(datetime.datetime.now())})\n\n # Check if getTemps flag is set by a mcNode object\n if (r.hmget('status:node:0', 'getTemps') == \"True\"):\n print(\"Inside the if statement\")\n try:\n print('getTemps is...',bool(r.hmget('status:node:0', 'getTemps')))\n print('getTemps is...',type(bool(r.hmget('status:node:0', 'getTemps'))))\n \n client_socket.sendto('getTemps', addressArduino)\n \n time.sleep(2)\n # Arduino checks if it received any udp packets and sends a respons back based on request\n debug_data, addr = client_socket.recvfrom(1024)\n \n unpacked_mcptemp_debug = struct.unpack('=f',debug_data[0:4])\n unpacked_htutemp_debug = struct.unpack('=f', debug_data[4:8])\n\n # print(\"Unpacked Response from Arduino: \", unpacked_mcptemp_debug[0])\n\n r.hmset('status:node:0', {'tempBotDebug':unpacked_htutemp_debug[0]})\n r.hmset('status:node:0', {'tempMidDebug':unpacked_mcptemp_debug[0]})\n # print(r.hmget('status:node:0','tempBotDebug'))\n\n # Set timestamp\n r.hmset('status:node:0', {'timestamp':str(datetime.datetime.now())})\n\n # Reset the flag for next request \n r.hmset('status:node:0', {'getTemps':False}) \n print('getTemps after resetting it is...',r.hget('status:node:0','getTemps')) \n \n except:\n\n pass\n print(r.hmget('status:node:0', 'reset'))\n print(type(r.hmget('status:node:0', 'reset')))\n print(len(r.hmget('status:node:0', 'reset')))\n print((r.hmget('status:node:0', 'reset')[0]))\n print(type(r.hmget('status:node:0', 'reset')[0]))\n if (r.hmget('status:node:0', 'reset')[0] == \"True\"):\n print(\"inside reset if\") \n # Reset the microcontroller when reset is sent through the mcNode class\n # might remove this functionality in the future; too much power for the mcNode class\n client_socket.sendto('reset', addressArduino)\n\n time.sleep(2)\n\n # Reset the flag to False so it's not resetting microcontroller to infinity\n r.hmset('status:node:0', {'reset': False})\n \n\n # Set delay before receiving more data\n time.sleep(2)\n\n\n" }, { "alpha_fraction": 0.5020105242729187, "alphanum_fraction": 0.5252087712287903, "avg_line_length": 27.086956024169922, "blob_id": "6e81ede868994dcfc98e91ae30a4db9510b0f1b9", "content_id": "950f28f104104bf9828c0589fb0c56ab466726a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3233, "license_type": "no_license", "max_line_length": 100, "num_lines": 115, "path": "/MACburner.ino", "repo_name": "reeveress/monitor-control-before-july-2017-trip", "src_encoding": "UTF-8", "text": "#include <EEPROM.h>\n#include <Ethernet.h>\n#include <EthernetUdp.h>\n\n\n//================ EEPROM Memory Map ================= \n// Address Byte Data(8 bits) Type \n// 0 ---- ---- MAC byte 0 \n// 1 ---- ---- MAC byte 1 \n// 2 ---- ---- MAC byte 2\n// 3 ---- ---- MAC byte 3\n// 4 ---- ---- MAC byte 4\n// 5 ---- ---- MAC byte 5\n// 6 ---- ---- Node ID \n// 7 ---- ---- unassigned\n// 8 ---- ---- unassigned\n// 9 ---- ---- unassigned\n// 10 ---- ---- unassigned\n// . ---- ---- unassigned\n// .. ---- ---- unassigned\n// ... ---- ---- unassigned\n// 1024 ---- ---- unassigned\n\n\nunsigned int nodeID = 2;\n\nunsigned int eeadr = 0; \nunsigned int eeNodeAdr = 6; // EEPROM addres that will store node ID number\n\nbyte mac[] = {0x00, 0x08, 0xDC, 0x00, 0x02, 0x4f}; //Assign MAC address of the Arduino here\n\nunsigned int localPort = 8888; // Assign a port to talk over\nint packetSize;\n\nEthernetUDP Udp; // UDP object\n\n// For future use; initializing buffer and data variables for receiving packets from the server\nchar packetBuffer[UDP_TX_PACKET_MAX_SIZE];\nString datReq; // String for data\n\n\n\n\nvoid setup() {\n Serial.begin(9600);\n \n // burn MAC to first 6 EEPROM bytes\n for (int i = 0; i < 6; i++){\n EEPROM.write(eeadr, mac[i]);\n ++eeadr;\n }\n \n // burn node ID to the 7th EEPROM byte \n EEPROM.write(eeNodeAdr, nodeID);\n\n // Zero all the other EEPROM cells - 255 is the default in each cell with an off the shelf Arduino\n for (int i = 7; i < 1024; i++){\n EEPROM.write(i,0);\n } \n \n // Print out the contents of EEPROM\n for (int i = 0; i < 8; i++) {\n Serial.println(\"Printing the contents of EEPROM\");\n Serial.print(\"Address: \");\n Serial.print(i);\n Serial.print(\"\\t\");\n Serial.print(\"Data: \");\n Serial.print(EEPROM.read(i));\n Serial.print(\"\\t\");\n\n } \n\n // Start Ethernet connection, automatically tries to get IP using DHCP\n if (Ethernet.begin(mac) == 0) {\n\n Serial.println(\"Failed to configure Ethernet using DHCP\");\n for (;;)\n ;\n }\n Serial.println(\"IP address:\");\n Serial.println(Ethernet.localIP());\n \n // Start UDP\n Udp.begin(localPort);\n delay(1500); // delay to give time for initialization\n\n // Set Pin 4 as the reset pin\n pinMode(4, OUTPUT);\n digitalWrite(4, HIGH);\n\n \n}\n\n\n\n\nvoid loop() {\n // Check if request was sent to Arduino\n packetSize = Udp.parsePacket(); //Reads the packet size\n Serial.println(\"Waiting to receive the reset command..\");\n \n if (packetSize>0) { //if packetSize is >0, that means someone has sent a request\n \n Udp.read(packetBuffer, UDP_TX_PACKET_MAX_SIZE); //Read the data request\n String datReq(packetBuffer); //Convert char array packetBuffer into a string called datReq\n \n if (datReq == \"reset\") {\n \n Serial.println(\"Resetting the microcontroller...\");\n digitalWrite(4, LOW);\n } \n \n }\n\n}\n\n\n\n" }, { "alpha_fraction": 0.7055214643478394, "alphanum_fraction": 0.7239263653755188, "avg_line_length": 22.285715103149414, "blob_id": "7fcd08f03f9b51dd65d60730632070696b309939", "content_id": "d939cc67d09ae7492112665697bc66bd1df88891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 163, "license_type": "no_license", "max_line_length": 56, "num_lines": 7, "path": "/mcNodeTest.py", "repo_name": "reeveress/monitor-control-before-july-2017-trip", "src_encoding": "UTF-8", "text": "import redis\nimport mcNode\n\nm = mcNode.mcNode()\nprint(\"getTemp method output: \", m.getTemp(0))\n#print(\"getTempDebug method ouput: \", m.getTempDebug(0))\nm.reset(0)\n" }, { "alpha_fraction": 0.5903366804122925, "alphanum_fraction": 0.6072989106178284, "avg_line_length": 38.693878173828125, "blob_id": "27cce135d897d34311185ceca981fb7cb1a7fc52", "content_id": "c670c034950ecd63d61704e6c0dae27a69b09c17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3891, "license_type": "no_license", "max_line_length": 135, "num_lines": 98, "path": "/udpClientReceiver.py", "repo_name": "reeveress/monitor-control-before-july-2017-trip", "src_encoding": "UTF-8", "text": "\"\"\"\nThis class is used for receiving the UDP packets from the Arduino.\n\nIt goes into an infinite while loop so has only the packet receiving functionality. \n\"\"\"\n\n\n\nimport time\nimport datetime\nimport struct\nimport redis\nimport socket\nimport sys\nimport smtplib\n\n# Define IP address of the Redis server host machine\nserverAddress = '10.28.1.207'\n\n# Define PORT for socket creation\nPORT = 8888\n\n\nclass UdpClient():\n\n\n def __init__(self):\n\n # define socket address for binding; necessary for receiving data from Arduino \n self.localSocket = (serverAddress, PORT)\n\n\n # Instantiate redis object connected to redis server running on serverAddress\n self.r = redis.StrictRedis(serverAddress)\n\n # Create a UDP socket\n try:\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n print('Socket created')\n except socket.error, msg:\n print('Failed to create socket. Error Code : ' + str(msg[0]) + ' Message ' + str(msg[1]))\n sys.exit()\n\n\n # Bind socket to local host and port\n try:\n self.client_socket.bind(self.localSocket)\n print('Bound socket')\n except socket.error , msg:\n print('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])\n sys.exit()\n\n # Make a server object to send alerts by email\n #server = smtplib.SMTP('smtp.gmail.com', 587)\n #server.login('[email protected]','monitorcontrol')\n #server.ehlo()\n #server.starttls()\n\n def receiveUDP(self):\n \"\"\"\n Goes into an infinite while loop to grab UDP packets.\n \"\"\"\n\n # Loop to grap UDP packets from Arduino and push to Redis\n while 1:\n\n # Receive data continuously from the server (Arduino in this case)\n data, addr = self.client_socket.recvfrom(1024)\n\n # Arduino sends a Struct via UDP so unpacking is needed \n # struct.unpack returns a tuple with one element\n # Each struct element is 4 Bytes (c floats are packed as 4 byte strings)\n\n unpacked_nodeID = struct.unpack('=f',data[0:4])\n unpacked_mcptemp_top = struct.unpack('=f',data[4:8])\n unpacked_mcptemp_mid = struct.unpack('=f',data[8:12])\n unpacked_htutemp = struct.unpack('=f', data[12:16])\n unpacked_htuhumid = struct.unpack('=f', data[16:20])\n unpacked_windspeed_mph = struct.unpack('=f', data[20:24])\n unpacked_tempCairflow = struct.unpack('=f', data[24:28])\n unpacked_serial = struct.unpack('=B',data[28])\n node = int(unpacked_nodeID[0])\n\n # if (unpacked_mcptemp_top > 27 && unpacked_mcptemp_mid > 27 && unpacked_htutemp > 27):\n #server.send('[email protected]','[email protected]','The temperature values are approaching critical levels, shutdown sequence initiated') \n # Set hashes in Redis composed of sensor temperature values\n self.r.hmset('status:node:%d'%node, {'tempTop':unpacked_mcptemp_top[0]})\n self.r.hmset('status:node:%d'%node, {'tempMid':unpacked_mcptemp_mid[0]})\n self.r.hmset('status:node:%d'%node, {'humidTemp':unpacked_htutemp[0]})\n self.r.hmset('status:node:%d'%node, {'humid':unpacked_htuhumid[0]})\n self.r.hmset('status:node:%d'%node, {'windSpeed_mph':unpacked_windspeed_mph[0]})\n self.r.hmset('status:node:%d'%node, {'tempCairflow':unpacked_tempCairflow[0]})\n \n # Set timestamp \n self.r.hmset('status:node:%d'%node, {'timestamp':str(datetime.datetime.now())})\n self.r.hmset('status:node:%d'%node, {'serial': unpacked_serial[0]})\n print('status:node:%d'%node,self.r.hgetall('status:node:%d'%node))\n\n" }, { "alpha_fraction": 0.6156171560287476, "alphanum_fraction": 0.6272040009498596, "avg_line_length": 31.52458953857422, "blob_id": "d29653ce693ce13a90c2fda0d05db3c17d26e2a1", "content_id": "637de636d30e60767fe4d5bca3037650e78f81bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1985, "license_type": "no_license", "max_line_length": 188, "num_lines": 61, "path": "/mcNode.py", "repo_name": "reeveress/monitor-control-before-july-2017-trip", "src_encoding": "UTF-8", "text": "'''\n\tmonitor control database class\n'''\nimport redis\nimport time\n \nHOST = '10.0.1.224'\nPORT = 6379\n\n\n\nclass mcNode():\n def __init__(self):\n \n \n # Class object init makes a connection with our 1U server to grap redis database values\n # Redis bind to port 6379 by default\n\tself.r = redis.StrictRedis(host = HOST)\n\n # Returns a dict of temperature sensors\n def getTemp(self,node):\n \n redistime = self.r.hmget(\"status:node:%d\"%node, \"timestamp\")[0] \n timestamp = {'timestamp': redistime}\n tempBot = float((self.r.hmget(\"status:node:%d\"%node,\"tempBot\"))[0])\n tempMid = float((self.r.hmget(\"status:node:%d\"%node,\"tempMid\"))[0])\n temps = {'timestamp':timestamp,'tempBot':tempBot,'tempMid':tempMid}\n return temps\n\n\n\n def getHumid(self,node):\n return self.r.hmget(\"status:node:%d\"%node,\"humidities\")\n\n\n\n\n def getAir(self,node):\n return self.r.hmget(\"status:node:%d\"%node,\"airflow\")\n\n\n\n\n# def getTempDebug(self,node):\n# # Set getTemps hash field to True. \n# self.r.hset(\"status:node:%d\"%node,\"getTemps\",True)\n# time.sleep(5)\n# tempBotDebug = float(self.r.hmget(\"status:node:%d\"%node, \"tempBot\")[0])\n# tempMidDebug = float(self.r.hmget(\"status:node:%d\"%node, \"tempMidDebug\")[0])\n# timestampDebug = self.r.hmget(\"status:node:%d\"%node, \"timestampDebug\")[0]\n# \n# # Return a dictionary of float values and string timestamp\n# tempsDebug = {'timestamp':timestampDebug,'tempBotDebug':tempBotDebug, 'tempMidDebug':tempMidDebug}\n# return tempsDebug\n#\n# def reset(self,node):\n# self.r.hset(\"status:node:%d\"%node,\"reset\",True)\n# print(\"Set reset flag to True\")\n# return \n# #def accumulate(self):\n\t# accumulates specified number of data or for specified period of time, saves to a file, maybe a plot script. Would be cool if I had a real time data upload to a server with cool graphix. \n" }, { "alpha_fraction": 0.8243243098258972, "alphanum_fraction": 0.8243243098258972, "avg_line_length": 23.66666603088379, "blob_id": "813f49139e226125b1572ebae6de8f4218a54677", "content_id": "92fe820cba1ad0c08f4ac67789c04bb311e05acc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 33, "num_lines": 3, "path": "/udpClientReceiverTest.py", "repo_name": "reeveress/monitor-control-before-july-2017-trip", "src_encoding": "UTF-8", "text": "import udpClientReceiver\nu = udpClientReceiver.UdpClient()\nu.receiveUDP()\n" }, { "alpha_fraction": 0.5822820663452148, "alphanum_fraction": 0.6105253100395203, "avg_line_length": 28.01912498474121, "blob_id": "779980684d5ee8a824ec302c957f96d9c75faab2", "content_id": "45b8bc1b3e330f93f803dceb672e823e40577403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10622, "license_type": "no_license", "max_line_length": 154, "num_lines": 366, "path": "/mc_arduino.ino", "repo_name": "reeveress/monitor-control-before-july-2017-trip", "src_encoding": "UTF-8", "text": "#include <ICMPPing.h>\n#include <util.h>\n#include <Adafruit_SleepyDog.h>\n#include <EEPROM.h>\n#include <Ethernet.h>\n#include <EthernetUdp.h>\n#include <SPI.h>\n#include <Adafruit_MCP9808.h>\n#include <Adafruit_HTU21DF.h>\n\n\n//================ EEPROM Memory Map ================= \n// Address Byte Data(8 bits) Type \n// 0 ---- ---- MAC byte 0 \n// 1 ---- ---- MAC byte 1 \n// 2 ---- ---- MAC byte 2\n// 3 ---- ---- MAC byte 3\n// 4 ---- ---- MAC byte 4\n// 5 ---- ---- MAC byte 5\n// 6 ---- ---- Node ID \n// 7 ---- ---- unassigned\n// 8 ---- ---- unassigned\n// 9 ---- ---- unassigned\n// 10 ---- ---- unassigned\n// . ---- ---- unassigned\n// .. ---- ---- unassigned\n// ... ---- ---- unassigned\n// 1024 ---- ---- unassigned\n\n\n\n\n// I2C addresses for the two MCP9808 temperature sensors\n#define TEMP_TOP 0x1A\n#define TEMP_MID 0x1B\n\n\nIPAddress serverIp(10, 0, 1, 224); // Server ip address\nEthernetClient client; // client object\nEthernetUDP Udp; // UDP object\n\nSOCKET pingSocket = 1; //Socket for pinging the server to monitor network connectivity; Socket 0 works for pinging but breaks the EthernetUDP\nICMPPing ping(pingSocket, (uint16_t)random(0, 255));\n\nunsigned int localPort = 8888; // Assign a port to talk over\nint packetSize;\n\n// For future use; initializing buffer and data variables for receiving packets from the server\nchar packetBuffer[UDP_TX_PACKET_MAX_SIZE];\nString datReq; // String for data\n\nbyte mac[6];\n\nunsigned int EEPROM_SIZE = 1024;\nunsigned int eeadr = 0; // MACburner.bin writes MAC addres to the first 6 addresses of EEPROM\nunsigned int eeNodeAdr = 6; // EEPROM node ID address\n\n\n// Sensor objects\nAdafruit_MCP9808 mcpTop = Adafruit_MCP9808(); \nAdafruit_MCP9808 mcpMid = Adafruit_MCP9808(); \nAdafruit_HTU21DF htu = Adafruit_HTU21DF();\n\n\n\n// Wind Sensor\n#define analogPinForRV 1 // change to pins you the analog pins are using\n#define analogPinForTMP 0\n\n// to calibrate your sensor, put a glass over it, but the sensor should not be\n// touching the desktop surface however.\n// adjust the zeroWindAdjustment until your sensor reads about zero with the glass over it. \n\nconst float zeroWindAdjustment = -.4; // negative numbers yield smaller wind speeds and vice versa.\n\nint TMP_Therm_ADunits; //temp termistor value from wind sensor\nfloat RV_Wind_ADunits; //RV output from wind sensor \nfloat RV_Wind_Volts;\nint TempCtimes100;\nfloat zeroWind_ADunits;\nfloat zeroWind_volts;\n\n\n\n// struct for a UDP packet\nstruct sensors {\n float nodeID;\n float mcpTempTop;\n float mcpTempMid;\n float htuTemp;\n float htuHumid;\n float windSpeed_MPH;\n float tempCAirflow;\n\n} sensorArray;\n\n\n\nvoid setup() {\n\n Watchdog.disable(); // Disable Watchdog so it doesn't get into infinite reset loop\n \n // Initialize Serial for error message output and debugging\n Serial.begin(57600);\n Serial.println(\"Running Setup...\");\n \n \n // Read MAC address from EEPROM (burned previously with MACburner.bin sketch)\n for (int i = 0; i < 6; i++){\n mac[i] = EEPROM.read(eeadr);\n ++eeadr;\n }\n \n // Read node ID from EEPROM (burned with MACburner.bin sketch) and assign it to struct nodeID member\n sensorArray.nodeID = EEPROM.read(eeNodeAdr);\n \n \n \n for (int i = 0; i < 8; i++) {\n Serial.println(\"Printing the contents of EEPROM\");\n Serial.print(\"Address: \");\n Serial.print(i);\n Serial.print(\"\\t\");\n Serial.print(\"Data: \");\n Serial.print(EEPROM.read(i));\n Serial.print(\"\\t\");\n }\n \n \n // Setting pins appropriately. Very important to first write LOW to digital pins \n // because setting the pin as OUTPUT changes it's state and has caused problems with the reset pin 4 before\n \n // PSU pin\n digitalWrite(2, LOW);\n pinMode(2, OUTPUT);\n digitalWrite(2, LOW);\n // White Rabbit 5V pin\n digitalWrite(3, LOW);\n pinMode(3, OUTPUT);\n digitalWrite(3,LOW);\n // FEM VAC pin\n digitalWrite(5, LOW);\n pinMode(5, OUTPUT);\n digitalWrite(5, LOW);\n // PAM VAC pin\n digitalWrite(6, LOW);\n pinMode(6, OUTPUT);\n digitalWrite(6, LOW);\n // reset pin\n digitalWrite(4, HIGH);\n pinMode(4, OUTPUT); \n digitalWrite(4, HIGH);\n\n \n\n // Start Ethernet connection, automatically tries to get IP using DHCP\n if (Ethernet.begin(mac) == 0) {\n Serial.println(\"Failed to configure Ethernet using DHCP, restarting sketch...\");\n delay(10000);\n }\n Serial.println(\"Configured IP:\");\n Serial.println(Ethernet.localIP());\n \n \n // Start UDP\n Udp.begin(localPort);\n Serial.println(\"UDP initialized!\");\n delay(1500); // delay to give time for initialization\n\n \n // Enable Watchdog for 8 seconds\n Watchdog.enable(8000);\n Serial.println(\"Watchdog enabled\");\n\n // Checking if HTU21DF temp and humidity sensor\n if (!htu.begin()) {\n Serial.println(\"Couldn't find HTU21DF!\");\n //Serial.println(\"Resetting the Microcontroller until the sensor is back online\");\n delay(10000);\n }\n Watchdog.reset();\n \n \n if (!mcpTop.begin(TEMP_TOP)) {\n Serial.println(\"Couldn't find MCP9808 TOP! Restarting sketch...\");\n delay(10000);\n }\n Watchdog.reset();\n \n\n if (!mcpMid.begin(TEMP_MID)) {\n Serial.println(\"Couldn't find MCP9808 MID! Restarting sketch...\");\n delay(10000);\n }\n Watchdog.reset();\n \n}\n\n\n \nvoid loop() {\n \n ICMPEchoReply echoReply = ping(serverIp, 4); //takes about 7295 ms to fail\n// char buffer [256];\n// Serial.println(echoReply.status);\n// if (echoReply.status == SUCCESS)\n// {\n// \n// sprintf(buffer,\n// \"Reply[%d] from: %d.%d.%d.%d: bytes=%d time=%ldms TTL=%d\",\n// echoReply.data.seq,\n// echoReply.addr[0],\n// echoReply.addr[1],\n// echoReply.addr[2],\n// echoReply.addr[3],\n// REQ_DATASIZE,\n// millis() - echoReply.data.time,\n// echoReply.ttl);\n// }\n// else\n// {\n// sprintf(buffer, \"Echo request failed; %d\", echoReply.status);\n// }\n// Serial.println(buffer);\n// delay(500);\n\n \n if (echoReply.status == SUCCESS){\n Serial.println(\"Server ping successful!\");\n Watchdog.reset();\n sensorArray.mcpTempTop = mcpTop.readTempC();\n delay(2000);\n Watchdog.reset();\n sensorArray.mcpTempMid = mcpMid.readTempC();\n delay(2000);\n Watchdog.reset();\n \n \n // Read and send humidity and temperature from HTU21DF sensor and send as UDP\n sensorArray.htuTemp = htu.readTemperature();\n sensorArray.htuHumid = htu.readHumidity();\n \n \n // Wind Sensor\n TMP_Therm_ADunits = analogRead(analogPinForTMP);\n RV_Wind_ADunits = analogRead(analogPinForRV);\n RV_Wind_Volts = (RV_Wind_ADunits * 0.0048828125);\n\n // these are all derived from regressions from raw data as such they depend on a lot of experimental factors\n // such as accuracy of temp sensors, and voltage at the actual wind sensor, (wire losses) which were unaccouted for.\n TempCtimes100 = (0.005 *((float)TMP_Therm_ADunits * (float)TMP_Therm_ADunits)) - (16.862 * (float)TMP_Therm_ADunits) + 9075.4; \n sensorArray.tempCAirflow = TempCtimes100/100.0;\n zeroWind_ADunits = -0.0006*((float)TMP_Therm_ADunits * (float)TMP_Therm_ADunits) + 1.0727 * (float)TMP_Therm_ADunits + 47.172; // 13.0C 553 482.39\n\n zeroWind_volts = (zeroWind_ADunits * 0.0048828125) - zeroWindAdjustment; \n\n // This from a regression from data in the form of \n // Vraw = V0 + b * WindSpeed ^ c\n // V0 is zero wind at a particular temperature\n // The constants b and c were determined by some Excel wrangling with the solver.\n \n sensorArray.windSpeed_MPH = pow(((RV_Wind_Volts - zeroWind_volts) /.2300) , 2.7265); \n \n //Serial.print(\" TMP volts \");\n //Serial.print(TMP_Therm_ADunits * 0.0048828125);\n \n //Serial.print(\" RV volts \");\n //Serial.print((float)RV_Wind_Volts);\n\n //Serial.print(\"TempC\");\n //Serial.print(sensorArray.tempCAirflow);\n\n //Serial.print(\" ZeroWind volts \");\n //Serial.print(zeroWind_volts);\n\n // Serial.print(\" WindSpeed MPH \");\n // Serial.println(sensorArray.windSpeed_MPH);\n \n \n \n \n \n // Send UDP packet to the server ip address serverIp that's listening on port localPort\n Udp.beginPacket(serverIp, localPort); // Initialize the packet send\n Udp.write((byte *)&sensorArray, sizeof sensorArray); // Send the struct as UDP packet\n Udp.endPacket(); // End the packet\n Serial.println(\"UDP packet sent...\");\n Watchdog.reset(); \n \n // Clear UDP packet buffer before sending another packet\n memset(packetBuffer, 0, UDP_TX_PACKET_MAX_SIZE);\n ;\n \n \n // Check if request was sent to Arduino\n packetSize = Udp.parsePacket(); //Reads the packet size\n \n if(packetSize>0) { //if packetSize is >0, that means someone has sent a request\n \n Udp.read(packetBuffer, UDP_TX_PACKET_MAX_SIZE); //Read the data request\n String datReq(packetBuffer); //Convert char array packetBuffer into a string called datReq\n Serial.println(\"Contents of the packetBuffer: \");\n Serial.println(packetBuffer);\n Serial.println(\"datReq:\");\n Serial.println(datReq);\n \n if (datReq == \"PSU_on\") {\n digitalWrite(2, HIGH);\n } \n \n else if (datReq == \"PSU_off\") {\n digitalWrite(2, LOW);\n }\n \n else if (datReq == \"WR_on\") {\n digitalWrite(3, HIGH);\n }\n \n else if (datReq == \"WR_off\") {\n digitalWrite(3, LOW);\n }\n \n else if (datReq == \"FEM_on\") {\n digitalWrite(5, HIGH);\n }\n \n else if (datReq == \"FEM_off\") {\n digitalWrite(5, LOW);\n }\n \n else if (datReq == \"PAM_on\") {\n Serial.println(\"Setting pin 6 HIGH\");\n delay(100);\n digitalWrite(6, HIGH);\n }\n \n else if (datReq == \"PAM_off\") {\n digitalWrite(6, LOW);\n }\n \n \n else if (datReq == \"reset\") {\n Serial.println(\"Resetting the microcontroller...\");\n digitalWrite(4, LOW);\n }\n \n }\n\n //clear out the packetBuffer array\n memset(packetBuffer, 0, UDP_TX_PACKET_MAX_SIZE); \n \n // Renew DHCP lease - times out eventually if this is removed\n Ethernet.maintain();\n\n // stroke the watchdog just in case\n Watchdog.reset();\n \n \n \n }\n else {\n Serial.println(\"Server ping unsuccessful, restarting sketch...\");\n delay(10000);\n }\n \n}\n\n" } ]
7
Global-localhost/mtq
https://github.com/Global-localhost/mtq
04a251f225af84f8159be52f936f7ed1d6ae73d9
9c7ee2f2495852844eaeaab190a9d587c1a9a9e2
473df4d5963b21bc0c2cf26900091eb88109c68a
refs/heads/master
2022-12-03T21:27:48.613578
2013-10-26T14:38:27
2013-10-26T14:38:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5062987208366394, "alphanum_fraction": 0.5078984498977661, "avg_line_length": 26.45856285095215, "blob_id": "a5a34b67fed1205e6f836d0f806705f4b6b9138a", "content_id": "e089af5de4f90df7a0061af44219263dcc408331", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5001, "license_type": "no_license", "max_line_length": 114, "num_lines": 181, "path": "/mtq/schedule.py", "repo_name": "Global-localhost/mtq", "src_encoding": "UTF-8", "text": "'''\nCreated on Aug 5, 2013\n\n@author: sean\n'''\nfrom dateutil.rrule import rrulestr\nimport time\nfrom threading import Timer, Lock\nfrom mtq.utils import now\nfrom functools import wraps\nimport logging\n\ndef sync(func):\n @wraps(func)\n def sync_decorator(self, *args, **kwargs):\n with self.lock:\n return func(self, *args, **kwargs)\n \n return sync_decorator\n\nclass Rule(object):\n def __init__(self, factory, doc):\n \n self.logger = logging.getLogger('mtq.Rule')\n\n self.lock = Lock()\n self.factory = factory\n self.doc = doc\n self.timer = None\n \n self.init_rule()\n \n def init_rule(self):\n doc = self.doc\n self.rrule = rrulestr(doc['rule'], dtstart=now())\n self.irule = iter(self.rrule)\n self.queue = self.factory.queue(doc['queue'], tags=doc['tags'])\n \n if self.timer is not None:\n self.set()\n \n def set(self):\n\n if self.timer is not None:\n self.timer.cancel()\n self.timer = None\n\n nw = now()\n self.nxt = nxt = next(self.irule, None)\n \n if nxt is None:\n self.logger.info('No more jobs to process, exiting')\n return\n \n if nxt < nw:\n timeout = 0\n else:\n timeout = (nxt - nw).seconds\n \n self.logger.info('Scheduling task \"%s\" to enqueue in %i seconds (%s)' % (self.task, timeout, nxt.ctime()))\n self.timer = t = Timer(timeout, self.execute_task)\n t.start()\n \n @property\n def task(self):\n return self.doc['task']\n \n @sync\n def execute_task(self):\n self.prev = self.nxt\n self.nxt = self.timer = None\n self.logger.info('Enquing task %s', self.task)\n self.queue.enqueue(self.task)\n self.set()\n \n def cancel(self):\n if self.timer:\n self.timer.cancel()\n @sync \n def refresh(self):\n last_modified = self.doc['modified']\n collection = self.factory.schedule_collection\n doc = collection.find_one({'_id':self.id})\n if doc is None:\n if self.timer:\n self.timer.cancel()\n return True\n \n if doc['modified'] > last_modified:\n self.logger.info('Task %s modified, updating' % (self.id))\n self.doc = doc\n self.init_rule()\n \n return False\n\n @property\n def id(self):\n return self.doc['_id']\n \nclass Scheduler(object):\n \n def __init__(self, factory):\n self.factory = factory\n \n self.logger = logging.getLogger('mtq.Scheduler')\n\n \n def add_job(self, rule, task, queue, tags=()):\n collection = self.factory.schedule_collection\n return collection.insert({'rule':rule, 'task':task, 'queue':queue, 'tags':tags,\n 'paused':False, 'active':True, 'modified':now()})\n\n def remove_job(self, _id):\n collection = self.factory.schedule_collection\n return collection.remove({'_id':_id})\n \n def update_job(self, _id, rule=None, task=None, queue=None, tags=None):\n collection = self.factory.schedule_collection\n query = {'_id':_id}\n doc = {'$set':{'modified':now()}}\n if rule is not None:\n doc['$set']['rule'] = rule\n if task is not None:\n doc['$set']['task'] = task\n if queue is not None:\n doc['$set']['queue'] = queue\n if tags is not None:\n doc['$set']['tags'] = tags\n \n return collection.update(query, doc)\n\n \n @property\n def jobs(self):\n collection = self.factory.schedule_collection\n return collection.find({'paused':False, 'active':True})\n \n def update_rules(self):\n pass\n \n def init_rules(self):\n \n self.rules = rules = set()\n \n for job in self.jobs:\n rule = Rule(self.factory, job)\n rules.add(rule)\n \n for r in rules:\n r.set()\n \n def refresh_rules(self):\n to_remove = set()\n for r in self.rules:\n should_remove = r.refresh()\n if should_remove:\n to_remove.add(r)\n \n for r in to_remove:\n self.rules.discard(r)\n \n _ids = {r.id for r in self.rules}\n for doc in self.jobs:\n if doc['_id'] not in _ids:\n rule = Rule(self.factory, doc)\n self.rules.add(rule)\n rule.set()\n \n def run(self):\n self.logger.info('Running Scheduler')\n self.init_rules()\n \n try:\n while 1:\n time.sleep(1)\n self.refresh_rules()\n except KeyboardInterrupt as err:\n self.logger.exception('good bye!')\n finally:\n for r in self.rules:\n r.cancel()\n \n \n \n" } ]
1
chunnlp/text_gen
https://github.com/chunnlp/text_gen
a8ce92f94545e4cd5509a3ba0db5371985ccdefc
b4ee8fe1ef01c2fe9ad981365111bdfb636e70e7
9d83ea67d629535d1a8cc53f159bbf25c9378997
refs/heads/master
2021-08-22T21:10:57.780293
2017-12-01T00:19:38
2017-12-01T00:19:38
112,710,238
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5655034184455872, "alphanum_fraction": 0.5745688080787659, "avg_line_length": 33.625, "blob_id": "5b67b781493b4553458fab0f6cc1fc9a4a7ce24c", "content_id": "08272501343e16ff1f94d35748d14fade0d09e02", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12465, "license_type": "permissive", "max_line_length": 98, "num_lines": 360, "path": "/text_generator.py", "repo_name": "chunnlp/text_gen", "src_encoding": "UTF-8", "text": "import time\n\nimport numpy as np\nimport tensorflow as tf\nimport data_reader\n\nfrom tensorflow.python.client import device_lib\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string('model', 'medium', 'model config')\nflags.DEFINE_string('data_path', 'data', 'path to data')\nflags.DEFINE_string('save_path', 'model', 'path to save model')\nflags.DEFINE_integer('num_gpus', 1, 'number of gpus')\nflags.DEFINE_string('rnn_mode', None, 'rnn type')\nflags.DEFINE_string('mode', 'train', 'train or test')\n\nFLAGS = flags.FLAGS\nBASIC = 'basic'\nCUDNN = 'cudnn'\nBLOCK = 'block'\n\n\nclass DataInput(object):\n def __init__(self, config, data, name=None):\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n self.epoch_size = ((len(data) // batch_size) - 1) // num_steps\n self.input_data, self.targets = reader.ptb_producer(\n data, batch_size, num_steps, name=name)\n\n\nclass Model(object):\n def __init__(self, is_training, config, input_, graph):\n self._is_training = is_training\n self._input = input_\n self._rnn_params = None\n self._cell = None\n self.batch_size = input_.batch_size\n self.num_steps = input_.num_steps\n hidden_size = config.hidden_size\n vocab_size = config.vocab_size\n self.graph = graph\n\n with self.graph.as_default():\n with tf.device('/cpu:0'):\n embedding = tf.get_variable(\n 'embedding', [vocab_size, hidden_size], dtype=tf.float32)\n inputs = tf.nn.embedding_lookup(embedding, input_.input_data)\n\n if is_training and config.keep_prob < 1:\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n\n output, state = self._build_rnn_graph(inputs, config, is_training)\n\n softmax_w = tf.get_variable(\n 'softmax_w', [hidden_size, vocab_size], dtype=tf.float32)\n softmax_b = tf.get_variable('softmax_b', [vocab_size], dtype=tf.float32)\n logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)\n logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size])\n\n loss = tf.contrib.seq2seq.sequence_loss(\n logits,\n input_.targets,\n tf.ones([self.batch_size, self.num_steps], dtype=tf.float32),\n average_across_timesteps=False,\n average_across_batch=True)\n\n self._cost = tf.reduce_sum(loss)\n self._final_state = state\n\n if not is_training:\n return\n\n self._lr = tf.Variable(0., trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars),\n config.max_grad_norm)\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n self._train_op = optimizer.apply_gradients(\n zip(grads, tvars),\n global_step=tf.train.get_or_create_global_step())\n\n self._new_lr = tf.placeholder(\n tf.float32, shape=[], name='new_learning_rate')\n self._lr_update = tf.assign(self._lr, self._new_lr)\n\n self.saver = tf.train.Saver(tf.global_variables())\n\n def _get_lstm_cell(self, config, is_training):\n if config.rnn_mode == BASIC:\n return tf.contrib.rnn.BasicLSTMCell(\n config.hidden_size, forget_bias=0., state_is_tuple=True,\n reuse=not is_training)\n if config.rnn_mode == BLOCK:\n return tf.contrib.rnn.LSTMBlockCell(\n config.hidden_size, forget_bias=0.)\n raise ValueError('rnn_mode {} not supported'.format(config.rnn_mode))\n\n\n def _build_rnn_graph(self, inputs, config, is_training):\n def make_cell():\n cell = self._get_lstm_cell(config, is_training)\n if is_training and config.keep_prob < 1:\n cell = tf.contrib.rnn.DropoutWrapper(\n cell, output_keep_prob=config.keep_prob)\n return cell\n\n cell = tf.contrib.rnn.MultiRNNCell(\n [make_cell() for _ in range(config.num_layers)], state_is_tuple=True)\n\n self._initial_state = cell.zero_state(config.batch_size, tf.float32)\n state = self._initial_state\n\n outputs = []\n with tf.variable_scope('RNN'):\n for time_step in range(self.num_steps):\n if time_step > 0: tf.get_variable_scope().reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n outputs.append(cell_output)\n output = tf.reshape(tf.concat(outputs, 1), [-1, config.hidden_size])\n return output, state\n\n\n def assign_lr(self, session, lr_value):\n session.run(self._lr_update, feed_dict={self._new_lr: lr_value})\n\n\n def with_prefix(self, prefix, name):\n return '/'.join((prefix, name))\n\n\n def export_ops(self, name):\n self._name = name\n ops = {self.with_prefix(self._name, 'cost'): self._cost}\n if self._is_training:\n ops.update(lr=self._lr, new_lr=self._new_lr, lr_update=self._lr_update)\n if self._rnn_params:\n ops.update(rnn_params=self._rnn_params)\n for name, op in ops.items():\n tf.add_to_collection(name, op)\n self._initial_state_name = self.with_prefix(self._name, 'initial')\n self._final_state_name = self.with_prefix(self._name, 'final')\n for state_tuple in self._initial_state:\n tf.add_to_collection(self._initial_state_name, state_tuple.c)\n tf.add_to_collection(self._initial_state_name, state_tuple.h)\n for state_tuple in self._final_state:\n tf.add_to_collection(self._final_state_name, state_tuple.c)\n tf.add_to_collection(self._final_state_name, state_tuple.h)\n\n\n def import_state_tuples(self, state_tuples, name, num_replicas):\n restored = []\n for i in range(len(state_tuples) * num_replicas):\n c = tf.get_collection_ref(name)[2 * i + 0]\n h = tf.get_collection_ref(name)[2 * i + 1]\n restored.append(tf.contrib.rnn.LSTMStateTuple(c, h))\n return tuple(restored)\n\n\n def import_ops(self):\n if self._is_training:\n self._train_op = tf.get_collection_ref('train_op')[0]\n self._lr = tf.get_collection_ref('lr')[0]\n self._new_lr = tf.get_collection_ref('new_lr')[0]\n self._lr_update = tf.get_collection_ref('lr_update')[0]\n rnn_params = tf.get_collection_ref('rnn_params')\n if self._cell and rnn_params:\n params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable(\n self._cell,\n self._cell.params_to_canonical,\n self._cell.canonical_to_params,\n rnn_params,\n base_variable_scope='Model/RNN')\n tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable)\n self._cost = tf.get_collection_ref(self.with_prefix(self._name, 'cost'))[0]\n num_replicas = FLAGS.num_gpus if self._name == 'Train' else 1\n self._initial_state = self.import_state_tuples(\n self._initial_state, self._initial_state_name, num_replicas)\n self._final_state = self.import_state_tuples(\n self._final_state, self._final_state_name, num_replicas)\n\n\n @property\n def input(self):\n return self._input\n\n\n @property\n def initial_state(self):\n return self._initial_state\n\n\n @property\n def cost(self):\n return self._cost\n\n\n @property\n def final_state(self):\n return self._final_state\n\n\n @property\n def lr(self):\n return self._lr\n\n\n @property\n def train_op(self):\n return self._train_op\n\n\n @property\n def initial_state_name(self):\n return self._initial_state_name\n\n\n @property\n def final_state_name(self):\n return self._final_state_name\n\n\nclass MediumConfig(object):\n init_scale = 0.05\n learning_rate = 1.\n max_grad_norm = 5\n num_layers = 2\n num_steps = 35\n hidden_size = 650\n max_epoch = 6\n max_max_epoch = 39\n keep_prob = 0.5\n lr_decay = 0.8\n batch_size = 20\n vocab_size = 10000\n rnn_mode = BLOCK\n\n\nclass LargeConfig(object):\n init_scale = 0.04\n learning_rate = 1.\n max_grad_norm = 10\n num_layers = 2\n num_steps = 35\n hidden_size = 1500\n max_epoch = 14\n max_max_epoch = 55\n keep_prob = 0.35\n lr_decay = 1 / 1.15\n batch_size = 20\n vocab_size = 10000\n rnn_mode = BLOCK\n\n\ndef run_epoch(session, model, eval_op=None, verbose=False):\n start_time = time.time()\n costs = 0.\n iters = 0\n state = session.run(model.initial_state)\n\n fetches = {\n 'cost': model.cost,\n 'final_state': model.final_state\n }\n\n if eval_op is not None:\n fetches['eval_op'] = eval_op\n\n for step in range(model.input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[h] = state[i].c\n feed_dict[c] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals['cost']\n state = vals['final_state']\n\n costs += cost\n iters += model.input.num_steps\n\n if verbose and step % (model.input.epoch_size // 10) == 10:\n print('{:.3f} perplexity: {:.3f} speed: {:.0f} wps'.format(\n step * 1. / model.input.epoch_size, np.exp(costs / iters),\n iters * model.input.batch_size * max(1, FLAGS.num_gpus) / (time.time() - start_time)))\n\n return np.exp(costs / iters)\n\n\ndef get_config():\n config = None\n if FLAGS.model == 'medium':\n config = MediumConfig()\n elif FLAGS.model == 'large':\n config = LargeConfig()\n else:\n raise ValueError('Invalid model: {}'.format(FLAGS.model))\n if FLAGS.rnn_mode:\n config.rnn_mode = FLAGS.rnn_mode\n if FLAGS.num_gpus != 1 or tf.__version__ < '1.3.0':\n config.rnn_mode = BASIC\n return config\n\n\ndef main(_):\n if not FLAGS.data_path:\n raise ValueError('data_path must be set')\n gpus = [\n x.name for x in device_lib.list_local_devices() if x.device_type == 'GPU'\n ]\n\n if FLAGS.num_gpus > len(gpus):\n raise ValueError('Invalid num_gpus')\n\n raw_data = reader.ptb_raw_data(FLAGS.data_path)\n train_data, valid_data, test_data, _ = raw_data\n\n config = get_config()\n eval_config = get_config()\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n\n train_graph = tf.Graph()\n eval_graph = tf.Graph()\n infer_graph = tf.Graph()\n with train_graph.as_default():\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n\n with tf.name_scope('Train'):\n train_input = DataInput(config=config, data=train_data, name='TrainInput')\n with tf.variable_scope('Model', reuse=None, initializer=initializer):\n m = Model(is_training=True, config=config, input_=train_input, graph=train_graph)\n tf.summary.scalar('Training Loss', m.cost)\n tf.summary.scalar('Learning rate', m.lr)\n\n latest_ckpt = tf.train.latest_checkpoint(FLAGS.save_path)\n\n with train_graph.as_default():\n sv = tf.train.Supervisor(logdir=FLAGS.save_path)\n config_proto = tf.ConfigProto(log_device_placement=False,\n allow_soft_placement=True)\n with sv.managed_session(config=config_proto) as train_sess:\n #with tf.Session(config=config_proto) as train_sess:\n train_sess.run(tf.global_variables_initializer())\n for i in range(config.max_max_epoch):\n lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.)\n m.assign_lr(train_sess, config.learning_rate * lr_decay)\n train_perplexity = run_epoch(train_sess, m, #eval_op=m.train_op,\n verbose=True)\n print('Epoch {} Train Perplexity: {:.3f}'.format(i + 1,\n train_perplexity))\n if i % 5 == 0:\n sv.saver.save(train_sess, FLAGS.save_path,\n global_step=sv.global_step)\n\nif __name__ == '__main__':\n tf.app.run()\n" } ]
1
Liang-lt/Sale_Manage
https://github.com/Liang-lt/Sale_Manage
f845f54f93fbf831b540298c69773392b19df116
15c985013acb0877b27b1bc289b7f4c987830b7a
b46fd0167830fb46b156591b7f7de7119b916f7f
refs/heads/master
2023-05-06T14:11:06.821430
2021-05-27T09:37:52
2021-05-27T09:37:52
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6112520098686218, "avg_line_length": 35.90643310546875, "blob_id": "3393dedfb8e1199bacf0b5c70de0193f3208d787", "content_id": "24126a6491a31ac8cf63efb778469b9209068305", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6602, "license_type": "no_license", "max_line_length": 136, "num_lines": 171, "path": "/Entity_Info/views.py", "repo_name": "Liang-lt/Sale_Manage", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,redirect\nfrom django.core.paginator import Paginator,EmptyPage,PageNotAnInteger\n# Create your views here.\n\nfrom Entity_Info.models import User_Info, Goods_Info, Supplier_Info, Branch_Info, Storage_Info\n\n# from django.db import connection\n# from django.http import HttpResponse\n# from django.template import loader\n# from pyecharts.faker import Faker\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Bar\n\ndef index(request):\n\n return render(request, 'index.html')\n\ndef logins(request):\n if request.method == 'POST':\n username = request.POST.get(\"username\")\n password = request.POST.get(\"password\")\n if not User_Info.objects.filter(user_account=username):\n msg_1 = '账号不存在!'\n return render(request, 'login.html', locals())\n if not User_Info.objects.filter(user_account=username,user_psw=password):\n msg_2 = '密码错误!'\n return render(request, 'login.html', locals())\n request.session['is_login'] = True\n request.session['username'] = username\n request.session['password'] = password\n return render(request, 'index.html', {'username': username})\n return render(request, 'login.html')\n\n\ndef regist(request):\n if request.method == 'POST':\n Account = request.POST.get(\"Account\")\n Name = request.POST.get(\"Name\")\n Tel = request.POST.get(\"Tel\")\n Password = request.POST.get(\"Password\")\n Password_r = request.POST.get(\"Password_r\")\n Branch = request.POST.get(\"Branch\")\n Section = request.POST.get(\"Section\")\n if len(Account) != 8:\n msg_1 = '账号长度应为8位'\n return render(request, 'regist.html', locals())\n elif Account.isdigit() == False :\n msg_1 = '账号应只含数字'\n return render(request, 'regist.html', locals())\n elif Account.count(' ') != 0:\n msg_1 = '账号应不含空格'\n return render(request, 'regist.html', locals())\n elif User_Info.objects.filter(user_account=Account):\n msg_1 = '账号重复'\n return render(request, 'regist.html', locals())\n\n if len(Name) >= 8:\n msg_2 = '员工名字大于8位'\n return render(request, 'regist.html', locals())\n\n if len(Tel) >= 11:\n msg_3 = '电话号码大于11位'\n return render(request, 'regist.html', locals())\n elif len(Tel) <= 7:\n msg_3 = '电话号码小于7位'\n return render(request, 'regist.html', locals())\n\n if len(Password) < 5 or len(Password) > 8:\n msg_4 = '密码长度应为6-8位字符'\n return render(request, 'regist.html', locals())\n elif Password != Password_r:\n msg_4 = '两次输入密码不一样!'\n return render(request, 'regist.html', locals())\n\n if Branch=='00':\n msg_5 = '请选择所属分店'\n return render(request, 'regist.html', locals())\n if Section=='00':\n msg_6 = '请选择所属部门'\n return render(request, 'regist.html', locals())\n User_Info.objects.create(user_account=Account,user_psw=Password,user_branch=Branch,user_dep=Section)\n\n return redirect('/login/')\n\n return render(request, 'regist.html',{'Dist': [list(item.values())[0] for item in Branch_Info.objects.values('branch_district')]})\n\n\ndef logout(request):\n if not request.session.get('is_login',None):\n return redirect('/login/')\n\n request.session.flush()\n return redirect('/regist/')\n\n\ndef goods_info_input(request):\n if request.method == 'POST':\n Id = request.POST.get(\"Goods_id\")\n Name = request.POST.get(\"Goods_name\")\n Category = request.POST.get(\"Goods_category\")\n Unit = request.POST.get(\"Goods_unit\")\n Price = request.POST.get(\"Goods_price\")\n\n if Goods_Info.objects.filter(goods_id=Id):\n msg_1 = '商品信息已存在'\n return render(request, 'goods_input.html', locals())\n\n Goods_Info.objects.create(goods_id=Id, goods_name=Name, goods_category=Category, goods_unit=Unit, goods_price=Price)\n\n return render(request, 'goods_input.html')\n\ndef supplier_info_input(request):\n if request.method == 'POST':\n Id = request.POST.get(\"Supplier_id\")\n Name = request.POST.get(\"Supplier_name\")\n Mail = request.POST.get(\"Supplier_mail\")\n Tel = request.POST.get(\"Supplier_tel\")\n Address = request.POST.get(\"Supplier_address\")\n\n if Supplier_Info.objects.filter(supplier_id=Id):\n msg_1 = '供应商信息已存在'\n return render(request, 'goods_new_input.html', locals())\n\n Supplier_Info.objects.create(supplier_id=Id, supplier_name=Name, supplier_mail=Mail, supplier_tel=Tel, supplier_address=Address)\n\n return render(request, 'goods_new_input.html')\n\ndef branch_info_input(request):\n if request.method == 'POST':\n Id = request.POST.get(\"Branch_id\")\n District = request.POST.get(\"Branch_district\")\n Address = request.POST.get(\"Branch_address\")\n\n if Branch_Info.objects.filter(supplier_id=Id):\n msg_1 = '分店信息已存在'\n return render(request, 'goods_new_input.html', locals())\n\n Supplier_Info.objects.create(branch_id=Id, branch_district=District, branch_address=Address)\n\n return render(request, 'goods_new_input.html')\n\n\ndef storage_info_input(request):\n if request.method == 'POST':\n Id = request.POST.get(\"Storage_id\")\n Address = request.POST.get(\"Storage_address\")\n\n if Branch_Info.objects.filter(supplier_id=Id):\n msg_1 = '仓库信息已存在'\n return render(request, 'goods_new_input.html', locals())\n\n Storage_Info.objects.create(storage_id=Id, storage_address=Address)\n\n return render(request, 'goods_new_input.html')\n\ndef show_chart(request):\n Goods = ['河马', '蟒蛇', '老虎', '大象', '兔子', '熊猫', '狮子']\n Value1 = [135, 37, 72, 150, 21, 98, 51]\n Value2 = [15, 56, 42, 10, 21, 98, 51]\n\n bar = (\n Bar()\n .add_xaxis(Goods)\n .add_yaxis(\"商家A\", Value1)\n .add_yaxis(\"商家B\", Value2)\n # .reversal_axis()\n # .set_series_opts(label_opts=opts.LabelOpts(position=\"right\"))\n .set_global_opts(title_opts=opts.TitleOpts(title=\"Bar-基本示例\", subtitle=\"我是副标题\"))\n )\n data = {'data': bar.render_embed()}\n return render(request, 'Pyechart.html', data)" }, { "alpha_fraction": 0.6357856392860413, "alphanum_fraction": 0.6584922671318054, "avg_line_length": 54.099998474121094, "blob_id": "9ac334150af314d373f66339c7bd56133baedd92", "content_id": "e6e87f45d294da1cf0ee20e6d4ec686906d8020f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1117, "license_type": "no_license", "max_line_length": 145, "num_lines": 20, "path": "/Relation_Info/views.py", "repo_name": "Liang-lt/Sale_Manage", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom Relation_Info.models import Company_Branch_Delivery\n# Create your views here.\n\ndef company_branch_delivery(request):\n if request.method == 'POST':\n Inquiry = request.POST.get(\"Inquiry\")\n Time_begin = request.POST.get(\"Time_begin\", \"2011-05-03\")\n Time_end = request.POST.get(\"Time_end\", \"2100-12-31 01:01:01\")\n print(Time_begin)\n if Inquiry == '单号顺序':\n bill_lists=Company_Branch_Delivery.objects.filter(goods_delivery_send_time__gt = Time_begin, goods_delivery_send_time__lt = Time_end)\n # bill_lists = Company_Branch_Delivery.objects.order_by('goods_delivery_id')[:4]\n return render(request, 'Test.html', {'bill_lists': bill_lists})\n if Inquiry == '单号逆序':\n bill_lists = Company_Branch_Delivery.objects.order_by('-goods_delivery_id')[:4]\n return render(request, 'Test.html', {'bill_lists': bill_lists})\n\n bill_lists = Company_Branch_Delivery.objects.order_by('goods_delivery_send_time')[:4]\n return render(request, 'Test.html', {'bill_lists': bill_lists})" } ]
2
networkitproject/networkit-mirror
https://github.com/networkitproject/networkit-mirror
3b498ed057ef6dc6accf76598f4cd1efaf220752
7abb7a609c67fe38f282b70848121ca8f6eb5324
2e5f8c07ea075f13e497696ee65f7a630191c227
HEAD
2016-09-14T03:45:37.029356
2016-07-31T13:40:26
2016-07-31T13:40:26
62,657,109
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.6412371397018433, "alphanum_fraction": 0.6618556976318359, "avg_line_length": 14.15625, "blob_id": "bceb840cb248d47227dd918b62e47e41845e2585", "content_id": "3edb951dc9b97bb800be2cdd9b4463ed56df370d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 485, "license_type": "permissive", "max_line_length": 44, "num_lines": 32, "path": "/networkit/cpp/graph/test/GraphBenchmark.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphBenchmark.h\n *\n * Created on: 01.02.2013\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef GRAPHBENCHMARK_H_\n#define GRAPHBENCHMARK_H_\n\n#include <gtest/gtest.h>\n\n#include \"../../auxiliary/Timer.h\"\n#include \"../../graph/Graph.h\"\n\n\nnamespace NetworKit {\n\nclass GraphBenchmark: public testing::Test {\nprotected:\n\tint64_t n;\npublic:\n\tGraphBenchmark();\n\tvirtual ~GraphBenchmark();\n};\n\n} /* namespace NetworKit */\n#endif /* GRAPHBENCHMARK_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6982086300849915, "alphanum_fraction": 0.7330251336097717, "avg_line_length": 31.200000762939453, "blob_id": "18d8a8d46d4beb01bad1b9ede505f1a8c36da03b", "content_id": "9f8ac329dd8c8ef02edd9b1fd819252f01d028a2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6922, "license_type": "permissive", "max_line_length": 105, "num_lines": 215, "path": "/networkit/cpp/graph/test/GraphToolsGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "#include \"GraphToolsGTest.h\"\n#include \"../Graph.h\"\n#include \"../GraphTools.h\"\n\nnamespace NetworKit {\n\nTEST_F(GraphToolsGTest, testGetContinuousOnContinuous) {\n\tGraph G(10);\n\tauto nodeIds = GraphTools::getContinuousNodeIds(G);\n\tstd::unordered_map<node,node> reference = {{0,0},{1,1},{2,2},{3,3},{4,4},{5,5},{6,6},{7,7},{8,8},{9,9}};\n\tEXPECT_EQ(reference,nodeIds);\n}\n\nTEST_F(GraphToolsGTest, testGetContinuousOnDeletedNodes1) {\n\tGraph G(10);\n\tG.removeNode(0);\n\tG.removeNode(1);\n\tG.removeNode(2);\n\tG.removeNode(3);\n\tG.removeNode(4);\n\tauto nodeIds = GraphTools::getContinuousNodeIds(G);\n\tstd::unordered_map<node,node> reference = {{5,0},{6,1},{7,2},{8,3},{9,4}};\n\tEXPECT_EQ(reference,nodeIds);\n}\n\nTEST_F(GraphToolsGTest, testGetContinuousOnDeletedNodes2) {\n\tGraph G(10);\n\tG.removeNode(0);\n\tG.removeNode(2);\n\tG.removeNode(4);\n\tG.removeNode(6);\n\tG.removeNode(8);\n\tauto nodeIds = GraphTools::getContinuousNodeIds(G);\n\tstd::unordered_map<node,node> reference = {{1,0},{3,1},{5,2},{7,3},{9,4}};\n\tEXPECT_EQ(reference,nodeIds);\n}\n\nTEST_F(GraphToolsGTest, testGetCompactedGraphUndirectedUnweighted1) {\n\tGraph G(10,false,false);\n\tG.addEdge(0,1);\n\tG.addEdge(2,1);\n\tG.addEdge(0,3);\n\tG.addEdge(2,4);\n\tG.addEdge(3,6);\n\tG.addEdge(4,8);\n\tG.addEdge(5,9);\n\tG.addEdge(3,7);\n\tG.addEdge(5,7);\n\n\tauto nodeMap = GraphTools::getContinuousNodeIds(G);\n\tauto Gcompact = GraphTools::getCompactedGraph(G,nodeMap);\n\t\n\tEXPECT_EQ(G.numberOfNodes(),Gcompact.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(),Gcompact.numberOfEdges());\n\tEXPECT_EQ(G.isDirected(),Gcompact.isDirected());\n\tEXPECT_EQ(G.isWeighted(),Gcompact.isWeighted());\n\t// TODOish: find a deeper test to check if the structure of the graphs are the same, \n\t// probably compare results of some algorithms or compare each edge with a reference node id map.\n}\n\nTEST_F(GraphToolsGTest, testGetCompactedGraphUndirectedUnweighted2) {\n\tGraph G(10,false,false);\n\tG.removeNode(0);\n\tG.removeNode(2);\n\tG.removeNode(4);\n\tG.removeNode(6);\n\tG.removeNode(8);\n\tG.addEdge(1,3);\n\tG.addEdge(5,3);\n\tG.addEdge(7,5);\n\tG.addEdge(7,9);\n\tG.addEdge(1,9);\n\n\tauto nodeMap = GraphTools::getContinuousNodeIds(G);\n\tauto Gcompact = GraphTools::getCompactedGraph(G,nodeMap);\n\t\n\tEXPECT_NE(G.upperNodeIdBound(),Gcompact.upperNodeIdBound());\n\tEXPECT_EQ(G.numberOfNodes(),Gcompact.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(),Gcompact.numberOfEdges());\n\tEXPECT_EQ(G.isDirected(),Gcompact.isDirected());\n\tEXPECT_EQ(G.isWeighted(),Gcompact.isWeighted());\n\t// TODOish: find a deeper test to check if the structure of the graphs are the same, \n\t// probably compare results of some algorithms or compare each edge with a reference node id map.\n}\n\nTEST_F(GraphToolsGTest, testGetCompactedGraphUndirectedWeighted1) {\n\tGraph G(10,true,false);\n\tG.removeNode(0);\n\tG.removeNode(2);\n\tG.removeNode(4);\n\tG.removeNode(6);\n\tG.removeNode(8);\n\tG.addEdge(1,3,0.2);\n\tG.addEdge(5,3,2132.351);\n\tG.addEdge(7,5,3.14);\n\tG.addEdge(7,9,2.7);\n\tG.addEdge(1,9,0.12345);\n\n\tauto nodeMap = GraphTools::getContinuousNodeIds(G);\n\tauto Gcompact = GraphTools::getCompactedGraph(G,nodeMap);\n\t\n\tEXPECT_EQ(G.totalEdgeWeight(),Gcompact.totalEdgeWeight());\n\tEXPECT_NE(G.upperNodeIdBound(),Gcompact.upperNodeIdBound());\n\tEXPECT_EQ(G.numberOfNodes(),Gcompact.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(),Gcompact.numberOfEdges());\n\tEXPECT_EQ(G.isDirected(),Gcompact.isDirected());\n\tEXPECT_EQ(G.isWeighted(),Gcompact.isWeighted());\n\t// TODOish: find a deeper test to check if the structure of the graphs are the same, \n\t// probably compare results of some algorithms or compare each edge with a reference node id map.\n}\n\nTEST_F(GraphToolsGTest, testGetCompactedGraphDirectedWeighted1) {\n\tGraph G(10,true,true);\n\tG.removeNode(0);\n\tG.removeNode(2);\n\tG.removeNode(4);\n\tG.removeNode(6);\n\tG.removeNode(8);\n\tG.addEdge(1,3,0.2);\n\tG.addEdge(5,3,2132.351);\n\tG.addEdge(7,5,3.14);\n\tG.addEdge(7,9,2.7);\n\tG.addEdge(1,9,0.12345);\n\n\tauto nodeMap = GraphTools::getContinuousNodeIds(G);\n\tauto Gcompact = GraphTools::getCompactedGraph(G,nodeMap);\n\t\n\tEXPECT_EQ(G.totalEdgeWeight(),Gcompact.totalEdgeWeight());\n\tEXPECT_NE(G.upperNodeIdBound(),Gcompact.upperNodeIdBound());\n\tEXPECT_EQ(G.numberOfNodes(),Gcompact.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(),Gcompact.numberOfEdges());\n\tEXPECT_EQ(G.isDirected(),Gcompact.isDirected());\n\tEXPECT_EQ(G.isWeighted(),Gcompact.isWeighted());\n\t// TODOish: find a deeper test to check if the structure of the graphs are the same, \n\t// probably compare results of some algorithms or compare each edge with a reference node id map.\n}\n\nTEST_F(GraphToolsGTest, testGetCompactedGraphDirectedUnweighted1) {\n\tGraph G(10,false,true);\n\tG.removeNode(0);\n\tG.removeNode(2);\n\tG.removeNode(4);\n\tG.removeNode(6);\n\tG.removeNode(8);\n\tG.addEdge(1,3);\n\tG.addEdge(5,3);\n\tG.addEdge(7,5);\n\tG.addEdge(7,9);\n\tG.addEdge(1,9);\n\tauto nodeMap = GraphTools::getContinuousNodeIds(G);\n\tauto Gcompact = GraphTools::getCompactedGraph(G,nodeMap);\n\t\n\tEXPECT_EQ(G.totalEdgeWeight(),Gcompact.totalEdgeWeight());\n\tEXPECT_NE(G.upperNodeIdBound(),Gcompact.upperNodeIdBound());\n\tEXPECT_EQ(G.numberOfNodes(),Gcompact.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(),Gcompact.numberOfEdges());\n\tEXPECT_EQ(G.isDirected(),Gcompact.isDirected());\n\tEXPECT_EQ(G.isWeighted(),Gcompact.isWeighted());\n\t// TODOish: find a deeper test to check if the structure of the graphs are the same, \n\t// probably compare results of some algorithms or compare each edge with a reference node id map.\n}\n\nTEST_F(GraphToolsGTest, testInvertedMapping) {\n\tGraph G(10,false,true);\n\tG.removeNode(0);\n\tG.removeNode(2);\n\tG.removeNode(4);\n\tG.removeNode(6);\n\tG.removeNode(8);\n\tG.addEdge(1,3);\n\tG.addEdge(5,3);\n\tG.addEdge(7,5);\n\tG.addEdge(7,9);\n\tG.addEdge(1,9);\n\tauto nodeMap = GraphTools::getContinuousNodeIds(G);\n\tauto invertedNodeMap = GraphTools::invertContinuousNodeIds(nodeMap,G);\n\n\tEXPECT_EQ(6,invertedNodeMap.size());\n\n\tstd::vector<node> reference = {1,3,5,7,9,10};\n\tEXPECT_EQ(reference,invertedNodeMap);\n}\n\nTEST_F(GraphToolsGTest, testRestoreGraph) {\n\tGraph G(10,false,true);\n\tG.removeNode(0);\n\tG.removeNode(2);\n\tG.removeNode(4);\n\tG.removeNode(6);\n\tG.removeNode(8);\n\tG.addEdge(1,3);\n\tG.addEdge(5,3);\n\tG.addEdge(7,5);\n\tG.addEdge(7,9);\n\tG.addEdge(1,9);\n\tauto nodeMap = GraphTools::getContinuousNodeIds(G);\n\tauto invertedNodeMap = GraphTools::invertContinuousNodeIds(nodeMap,G);\n\tstd::vector<node> reference = {1,3,5,7,9,10};\n\n\n\tEXPECT_EQ(6,invertedNodeMap.size());\n\tEXPECT_EQ(reference,invertedNodeMap);\n\n\tauto Gcompact = GraphTools::getCompactedGraph(G,nodeMap);\n\tGraph Goriginal = GraphTools::restoreGraph(invertedNodeMap,Gcompact);\n\n\tEXPECT_EQ(Goriginal.totalEdgeWeight(),Gcompact.totalEdgeWeight());\n\tEXPECT_NE(Goriginal.upperNodeIdBound(),Gcompact.upperNodeIdBound());\n\tEXPECT_EQ(Goriginal.numberOfNodes(),Gcompact.numberOfNodes());\n\tEXPECT_EQ(Goriginal.numberOfEdges(),Gcompact.numberOfEdges());\n\tEXPECT_EQ(Goriginal.isDirected(),Gcompact.isDirected());\n\tEXPECT_EQ(Goriginal.isWeighted(),Gcompact.isWeighted());\n}\n\n}" }, { "alpha_fraction": 0.6634782552719116, "alphanum_fraction": 0.6747826337814331, "avg_line_length": 22, "blob_id": "7131e26b6f49b270e0d41fc9f737b76a3c21ee18", "content_id": "8f2b1b7ae56d7d3b0a22706f8f06107d11c15a1d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1150, "license_type": "permissive", "max_line_length": 135, "num_lines": 50, "path": "/networkit/cpp/numerics/GaussSeidelRelaxation.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GaussSeidelRelaxation.cpp\n *\n * Created on: 27.10.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"GaussSeidelRelaxation.h\"\n\nnamespace NetworKit {\n\nGaussSeidelRelaxation::GaussSeidelRelaxation(double tolerance) : tolerance(tolerance) {\n}\n\nVector GaussSeidelRelaxation::relax(const CSRMatrix &A, const Vector &b, const Vector &initialGuess, const count maxIterations) const {\n\tcount iterations = 0;\n\tVector x_old = initialGuess;\n\tVector x_new = initialGuess;\n\tif (maxIterations == 0) return initialGuess;\n\n\tcount dimension = A.numberOfColumns();\n\tVector diagonal = A.diagonal();\n\n\tdo {\n\t\tx_old = x_new;\n\n\t\tfor (index i = 0; i < dimension; ++i) {\n\t\t\tdouble sigma = 0.0;\n\t\t\tA.forNonZeroElementsInRow(i, [&](index column, double value) {\n\t\t\t\tif (column != i) {\n\t\t\t\t\tsigma += value * x_new[column];\n\t\t\t\t}\n\t\t\t});\n\n\t\t\tx_new[i] = (b[i] - sigma) / diagonal[i];\n\t\t}\n\n\t\titerations++;\n\t} while (iterations < maxIterations);\n\n\treturn x_new;\n}\n\nVector GaussSeidelRelaxation::relax(const CSRMatrix &A, const Vector &b, const count maxIterations) const {\n\tVector x(b.getDimension());\n\treturn relax(A, b, x, maxIterations);\n}\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6840026378631592, "alphanum_fraction": 0.6951941847801208, "avg_line_length": 22.734375, "blob_id": "93d2856c91b853c1bbe141282e217d4e23b0f151", "content_id": "7aea248cb037056d9eae394b9b14d94a0016a30c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1519, "license_type": "permissive", "max_line_length": 92, "num_lines": 64, "path": "/networkit/cpp/clique/MaxClique.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MaxClique.h\n *\n * Created on: 08.12.2014\n * Author: Henning\n */\n\n#ifndef MAXCLIQUE_H_\n#define MAXCLIQUE_H_\n\n#include \"../graph/Graph.h\"\n#include <unordered_set>\n\n\nnamespace NetworKit {\n\n\n/**\n * Exact algorithm for computing the size of the largest clique in a graph.\n * Worst-case running time is exponential, but in practice the algorithm is fairly fast.\n * Reference: Pattabiraman et al., http://arxiv.org/pdf/1411.7460.pdf\n */\nclass MaxClique {\nprotected:\n\tconst Graph& G;\n\tcount maxi;\n\tstd::unordered_set<node> bestClique;\n\n\t/**\n\t * Subroutine that goes through every relevant clique containing a certain node in\n\t * a recursive fashion and computes the size of the largest.\n\t */\n\tvoid clique(std::unordered_set<node>& U, std::unordered_set<node>& currClique, count size);\n\npublic:\n\t/**\n\t * Constructor for maximum clique algorithm.\n\t * @param[in] G Graph @a G for which algorithm should be run.\n\t * @param[in] lb Lower bound for maximum clique size.\n\t */\n\tMaxClique(const Graph& G, count lb=0);\n\n\t/**\n\t * Actual maximum clique algorithm. Determines largest clique each vertex\n\t * is contained in and returns size of largest. Pruning steps keep running time\n\t * acceptable in practice.\n\t * @return Size of maximum clique.\n\t */\n\tvoid run();\n\n\t/**\n\t * Get size of maximum clique.\n\t * @return Size of maximum clique\n\t */\n\tcount getMaxCliqueSize();\n\n\t/**\n\t * @return Largest clique of the graph.\n\t */\n\tstd::unordered_set<node> getMaxClique() const;\n};\n\n} /* namespace NetworKit */\n#endif /* MAXCLIQUE_H_ */\n" }, { "alpha_fraction": 0.6371191143989563, "alphanum_fraction": 0.6592797636985779, "avg_line_length": 12.884614944458008, "blob_id": "0f1e1adc06ebfa6328b34b06430d5b464e51bc0d", "content_id": "d97fb7440babfab465cfe1282836f73f75b45d13", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 361, "license_type": "permissive", "max_line_length": 41, "num_lines": 26, "path": "/networkit/cpp/global/test/GlobalGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GlobalGTest.h\n *\n * Created on: 03.06.2013\n * Author: cls\n */\n\n#ifndef NOGTEST\n\n#ifndef GLOBALGTEST_H_\n#define GLOBALGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass GlobalGTest: public testing::Test {\npublic:\n\tGlobalGTest();\n\tvirtual ~GlobalGTest();\n};\n\n} /* namespace NetworKit */\n#endif /* GLOBALGTEST_H_ */\n\n#endif /* NOGTEST */\n" }, { "alpha_fraction": 0.7331613302230835, "alphanum_fraction": 0.743813693523407, "avg_line_length": 84.38983154296875, "blob_id": "81ba1a06c96b6a2f482e9cd2abb55a690dcf6ed0", "content_id": "50b8141871c8c94cb226bc8f29d0099cfcb9eb55", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 30228, "license_type": "permissive", "max_line_length": 327, "num_lines": 354, "path": "/Doc/doc/publications.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": ".. include:: <isonum.txt>\n\n.. |br| raw:: html\n\n <br />\n\n.. |separator| raw:: html\n\n\t<div style=\"padding-top: 25px; border-bottom: 1px solid #d4d7d9;\"></div>\n\n.. _publications:\n\n============\nPublications\n============\n\nThe following is a list of publications on the basis of NetworKit. We ask you to **cite** the appropriate ones if you found NetworKit useful for your own research.\nAlso, we would appreciate it if you pointed us to your publications in which you used NetworKit and allowed us to reference them on this page.\n\nJournal Paper on NetworKit as a Software Toolkit\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. raw:: html\n\n <ul>\n <li>\n C. Staudt, A. Sazonovs and H. Meyerhenke: NetworKit: A Tool Suite for Large-scale Complex Network Analysis. To appear in Network Science, Cambridge University Press.\n [<a href=\"http://arxiv.org/abs/1403.3005\">arXiv</a>]\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> We introduce NetworKit, an open-source software package for analyzing the structure of large complex networks. Appropriate algorithmic solutions\n are required to handle increasingly common large graph data sets containing up to billions of connections. We describe the methodology applied to develop scalable\n solutions to network analysis problems, including techniques like parallelization, heuristics for computationally expensive problems, efficient data structures,\n and modular software architecture. Our goal for the software is to package results of our algorithm engineering efforts and put them into the hands of domain experts.\n NetworKit is implemented as a hybrid combining the kernels written in C++ with a Python front end, enabling integration into the Python ecosystem of tested tools for\n data analysis and scientific computing. The package provides a wide range of functionality (including common and novel analytics algorithms and graph generators) and\n does so via a convenient interface. In an experimental comparison with related software, NetworKit shows the best performance on a range of typical analysis tasks.\n </div>\n </li>\n </ul>\n\n|separator|\n\n\nPublications on Algorithms Available in NetworKit\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. raw:: html\n\n <ul>\n <li>\n M. von Looz, M. Özdayi, S. Laue, H. Meyerhenke: Generating massive complex networks with hyperbolic geometry faster in practice. To appear at <i><a href=\"http://ieee-hpec.org/\">HPEC</a> '16</i>. [<a href=\"http://arxiv.org/abs/1606.09481\">arXiv</a>]\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> Generative network models play an important role in algorithm development, scaling studies, network analysis, and realistic system benchmarks for graph data sets. The commonly used graph-based benchmark model R-MAT has some drawbacks concerning realism and the scaling behavior of network properties.\n A complex network model gaining considerable popularity builds random hyperbolic graphs, generated by distributing points within a disk in the hyperbolic plane and then adding edges between points whose hyperbolic distance is below a threshold.\n We present in this paper a fast generation algorithm for such graphs.\n Our experiments show that our new generator achieves speedup factors of 3-60 over the best previous implementation.\n One billion edges can now be generated in under one minute on a shared-memory workstation.\n Furthermore, we present a dynamic extension to model gradual network change, while preserving at each step the point position probabilities.\n </div>\n </li>\n\n <br>\n\n <li>\n M. von Looz, H. Meyerhenke: Querying Probabilistic Neighborhoods in Spatial Data Sets Efficiently. To appear at <i><a href=\"http://iwoca2016.cs.helsinki.fi/\">IWOCA 2016</a></i>. [<a href=\"http://arxiv.org/abs/1509.01990\">arXiv</a>]\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> The probability that two spatial objects establish some kind of mutual connection often depends on their proximity.\n To formalize this concept, we define the notion of a <i>probabilistic neighborhood</i>:\n \\(\\newcommand{\\dist}{\\operatorname{dist}}\\)\n Let \\(P\\) be a set of \\(n\\) points in \\(\\mathbb{R}^d\\), \\(q \\in \\mathbb{R}^d\\) a query point, \\(\\dist\\) a distance metric, and \\(f : \\mathbb{R}^+ \\rightarrow [0,1]\\) a monotonically decreasing function.\n Then, the probabilistic neighborhood \\(N(q, f)\\) of \\(q\\) with respect to \\(f\\) is\n a random subset of \\(P\\) and each point \\(p \\in P\\) belongs to \\(N(q,f)\\) with probability \\(f(\\dist(p,q))\\).\n Possible applications include query sampling and the simulation of probabilistic spreading phenomena, as well as other scenarios where the probability of a connection between two entities decreases with their distance.\n We present a fast, sublinear-time query algorithm to sample probabilistic neighborhoods from planar point sets.\n For certain distributions of planar \\(P\\), we prove that our algorithm answers a query in \\(O((|N(q,f)| + \\sqrt{n})\\log n)\\) time with high probability.\n In experiments this yields a speedup over pairwise distance probing of at least one order of magnitude, even for rather small data sets with \\(n=10^5\\) and also for other point distributions not covered by the theoretical results.\n </div>\n </li>\n\n <br>\n\n <li>\n E. Bergamini, H. Meyerhenke: Approximating Betweenness Centrality in Fully-dynamic Networks. To appear in <i>Internet Mathematics</i>. Code to appear in NetworKit.\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> Betweenness is a well-known centrality measure that ranks the nodes of a network according to their participation in shortest paths. Since an exact\n computation is prohibitive in large networks, several approximation algorithms have been proposed. Besides that, recent years have seen the publication of dynamic\n algorithms for efficient recomputation of betweenness in networks that change over time. In this paper we propose the first betweenness centrality approximation\n algorithms with a provable guarantee on the maximum approximation error for dynamic networks. Several new intermediate algorithmic results contribute to the\n respective approximation algorithms: (i) new upper bounds on the vertex diameter, (ii) the first fully-dynamic algorithm for updating an approximation of the\n vertex diameter in undirected graphs, and (iii) an algorithm with lower time complexity for updating single-source shortest paths in unweighted graphs after a batch\n of edge actions. Using approximation, our algorithms are the first to make in-memory computation of betweenness in dynamic networks with millions of edges feasible.\n Our experiments show that our algorithms can achieve substantial speedups compared to recomputation, up to several orders of magnitude. Moreover, the approximation\n accuracy is usually significantly better than the theoretical guarantee in terms of absolute error. More importantly, for reasonably small approximation error\n thresholds, the rank of nodes is well preserved, in particular for nodes with high betweenness.\n </div>\n </li>\n\n <br>\n\n <li>\n G. Lindner, C. L. Staudt, M. Hamann, H. Meyerhenke, D. Wagner: Structure-Preserving Sparsification Methods for Social Networks. To appear in <i>Social Network Analysis\n and Mining</i>.\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> Sparsification reduces the size of networks while preserving structural and statistical properties of interest. Various sparsifying algorithms have been\n proposed in different contexts. We contribute the first systematic conceptual and experimental comparison of edge sparsification methods on a diverse set of network\n properties. It is shown that they can be understood as methods for rating edges by importance and then filtering globally or locally by these scores. We show\n that applying a local filtering technique improves the preservation of all kinds of properties. In addition, we propose a new sparsifi- cation method (Local Degree)\n which preserves edges leading to local hub nodes. All methods are evaluated on a set of social networks from Facebook, Google+, Twitter and LiveJournal with respect\n to network properties including diameter, connected components, community structure, multiple node centrality measures and the behavior of epidemic simulations.\n In order to assess the preservation of the community structure, we also include experiments on synthetically generated networks with ground truth communities.\n Experiments with our implementations of the sparsification methods (included in the open-source network analysis tool suite NetworKit) show that many network\n properties can be preserved down to about 20% of the original set of edges for sparse graphs with a reasonable density. The experimental results allow us to\n differentiate the behavior of different methods and show which method is suitable with respect to which property. While our Local Degree method is best for\n preserving connectivity and short distances, other newly introduced local variants are best for preserving the community structure.\n </div>\n </li>\n\n <br>\n\n <li>\n E. Bergamini, M. Borassi, P. Crescenzi, A. Marino, H. Meyerhenke: Computing Top-k Closeness Centrality Faster in Unweighted Graphs. In <i>Proc. SIAM Algorithm\n Engineering & Experiments</i> (ALENEX 2016). Code to appear in NetworKit.\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> Centrality indices are widely used analytic measures for the importance of nodes in a network. Closeness centrality is very popular among these measures.\n For a single node v, it takes the sum of the distances of v to all other nodes into account. The currently best algorithms in practical applications for\n computing the closeness for all nodes exactly in unweighted graphs are based on breadth-first search (BFS) from every node. Thus, even for sparse graphs,\n these algorithms require quadratic running time in the worst case, which is prohibitive for large networks. <br>\n In many relevant applications, however, it is un- necessary to compute closeness values for all nodes. Instead, one requires only the k nodes with the highest\n closeness values in descending order. Thus, we present a new algorithm for computing this top-k ranking in unweighted graphs. Following the rationale of previous\n work, our algorithm significantly re- duces the number of traversed edges. It does so by computing upper bounds on the closeness and stopping the current BFS search\n when k nodes already have higher closeness than the bounds computed for the other nodes.<br>\n In our experiments with real-world and synthetic instances of various types, one of these new bounds is good for small-world graphs with low diameter (such as\n social networks), while the other one excels for graphs with high diameter (such as road networks). Combining them yields an algorithm that is faster than the state\n of the art for top-k computations for all test instances, by a wide margin for high-diameter.\n </div>\n </li>\n\n <br>\n\n <li>\n M. von Looz, R. Prutkin and H. Meyerhenke: Fast Generation of Complex Networks with Underlying Hyperbolic Geometry. In <i>Proc. 26th International Symposium on\n Algorithms and Computation</i> (ISAAC 2015). Code in NetworKit. [<a href=\"http://arxiv.org/abs/1501.03545\">arXiv</a>]\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> Complex networks have become increasingly popular for mod- eling various real-world phenomena. Realistic generative network models are important in\n this context as they avoid privacy concerns of real data and simplify complex network research regarding data sharing, reproducibility, and scalability studies.\n Random hyperbolic graphs are a well-analyzed family of geometric graphs. Previous work provided empir- ical and theoretical evidence that this generative graph\n model creates networks with non-vanishing clustering and other realistic features. How- ever, the investigated networks in previous applied work were small,\n possibly due to the quadratic running time of a previous generator.\n In this work we provide the first generation algorithm for these networks with subquadratic running time. We prove a time complexity of\n \\(\\mathcal{O}((n^{3/2} + m) \\log n)\\) with high probability for the generation process. This running time is confirmed by experimental data with our\n implementation. The acceleration stems primarily from the reduction of pairwise distance computations through a polar quadtree, which we adapt to hyperbolic\n space for this purpose. In practice we improve the running time of a previous implementation by at least two orders of magnitude this way. Networks with billions\n of edges can now be generated in a few minutes. Finally, we evaluate the largest networks of this model published so far. Our empirical analysis shows that\n important features are retained over different graph densities and degree distributions.\n </div>\n </li>\n\n <br>\n\n <li>\n E. Bergamini and H. Meyerhenke: Fully-dynamic Approximation of Betweenness Centrality. In <i>Proc. 23rd European Symp. on Algorithms</i> (ESA 2015). Code to appear\n in NetworKit. [<a href=\"http://arxiv.org/abs/1504.07091\">arXiv</a>]\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> Betweenness is a well-known centrality measure that ranks the nodes of a network according to their participation in shortest paths. Since an\n exact computation is prohibitive in large networks, several approximation algorithms have been proposed. Besides that, recent years have seen the publication of\n dynamic algorithms for efficient recomputation of betweenness in evolving networks. In previous work we proposed the first semi-dynamic algorithms that recompute\n an approximation of betweenness in connected graphs after batches of edge insertions.In this paper we propose the first fully-dynamic approximation algorithms\n (for weighted and unweighted undirected graphs that need not to be connected) with a provable guarantee on the maximum approxima- tion error. The transfer to\n fully-dynamic and disconnected graphs implies additional algorithmic problems that could be of independent interest. In particular, we propose a new upper bound\n on the vertex diameter for weighted undirected graphs. For both weighted and unweighted graphs, we also propose the first fully-dynamic algorithms that keep\n track of this upper bound. In addition, we extend our former algorithm for semi- dynamic BFS to batches of both edge insertions and deletions. <br>\n Using approximation, our algorithms are the first to make in-memory computation of betweenness in fully-dynamic networks with millions of edges feasible.\n Our experiments show that they can achieve substantial speedups compared to recomputation, up to several orders of magnitude.\n </div>\n </li>\n\n <br>\n\n <li>\n E. Bergamini, H. Meyerhenke and C. Staudt: Approximating Betweenness Centrality in Large Evolving Networks. In <i>Proc. SIAM Algorithm Engineering & Experiments</i>\n (ALENEX 2015). [<a href=\"http://arxiv.org/abs/1409.6241\">arXiv</a>] [<a href=\"http://dx.doi.org/10.1137/1.9781611973754.12\">DOI: 10.1137/1.9781611973754.12</a>]\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> Betweenness centrality ranks the importance of nodes by their participation in all shortest paths of the network. Therefore computing exact\n betweenness values is impractical in large networks. For static networks, approximation based on randomly sampled paths has been shown to be significantly faster\n in practice. However, for dynamic networks, no approximation algorithm for betweenness centrality is known that improves on static recomputation. We address this\n deficit by proposing two incremental approximation algorithms (for weighted and unweighted connected graphs) which provide a provable guarantee on the absolute\n approximation error. Processing batches of edge insertions, our algorithms yield significant speedups up to a factor of 104 compared to restarting the approximation.\n This is enabled by investing memory to store and efficiently update shortest paths. As a building block, we also propose an asymptotically faster algorithm for\n updating the SSSP problem in unweighted graphs. Our experimental study shows that our algorithms are the first to make in-memory computation of a betweenness\n ranking practical for million-edge semi-dynamic networks. Moreover, our results show that the accuracy is even better than the theoretical guarantees in terms of\n absolutes errors and the rank of nodes is well preserved, in particular for those with high betweenness.\n </div>\n </li>\n\n <br>\n\n <li>\n C. Staudt, Y. Marrakchi, H. Meyerhenke: Detecting Communities Around Seed Nodes in Complex Networks. In <i>Proc. First International Workshop on High Performance\n Big Graph Data Management, Analysis, and Mining</i>, co-located with the <i>IEEE BigData 2014 conference</i>.\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract.</b> The detection of communities (internally dense subgraphs) is a network analysis task with manifold applications. The special task of selective\n community detection is concerned with finding high-quality communities locally around seed nodes. Given the lack of conclusive experimental studies, we perform\n a systematic comparison of different previously published as well as novel methods. In particular we evaluate their performance on large complex networks,\n such as social networks. Algorithms are compared with respect to accuracy in detecting ground truth communities, community quality measures, size of communities\n and running time. We implement a generic greedy algorithm which subsumes several previous efforts in the field. Experimental evaluation of multiple objective\n functions and optimizations shows that the frequently proposed greedy approach is not adequate for large datasets. As a more scalable alternative, we propose\n selSCAN, our adaptation of a global, density-based community detection algorithm. In a novel combination with algebraic distances on graphs, query times can\n be strongly reduced through preprocessing. However, selSCAN is very sensitive to the choice of numeric parameters, limiting its practicality. The\n random-walk-based PageRankNibble emerges from the comparison as the most successful candidate.\n </div>\n </li>\n\n <br>\n\n <li>\n C. Staudt and H. Meyerhenke: Engineering Parallel Algorithms for Community Detection in Massive Networks. Accepted by <i>IEEE Transactions on Parallel and\n Distributed Systems</i> (TPDS). [<a href=\"http://arxiv.org/abs/1304.4453\">arXiv</a>]\n [<a href=\"http://dx.doi.org/10.1109/TPDS.2015.2390633\">DOI: 10.1109/TPDS.2015.2390633</a>] &#169; 2015 IEEE\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n Please note that <a href=\"https://algohub.iti.kit.edu/parco/NetworKit/NetworKit/archive/9cfacca668d8f4e4740d880877fee34beb276792.zip?subrepos=false\">NetworKit 3.5</a>\n is the last version to include an implementation of the EPP algorithm. <br><br>\n\n <b>Abstract</b> The amount of graph-structured data has recently experienced an enormous growth in many applications. To transform such data into useful\n information, fast analytics algorithms and software tools are necessary. One common graph analytics kernel is disjoint community detection (or graph clustering).\n Despite extensive research on heuristic solvers for this task, only few parallel codes exist, although parallelism will be necessary to scale to the data volume\n of real-world applications. We address the deficit in computing capability by a flexible and extensible community detection framework with shared-memory parallelism.\n Within this framework we design and implement efficient parallel community detection heuristics: A parallel label propagation scheme; the first large-scale\n parallelization of the well-known Louvain method, as well as an extension of the method adding refinement; and an ensemble scheme combining the above.\n In extensive experiments driven by the algorithm engineering paradigm, we identify the most successful parameters and combinations of these algorithms. We also\n compare our implementations with state-of-the-art competitors. The processing rate of our fastest algorithm often reaches 50M edges/second. We recommend the\n parallel Louvain method and our variant with refinement as both qualitatively strong and fast. Our methods are suitable for massive data sets with billions of\n edges.\n </div>\n </li>\n\n <br>\n\n <li>\n C. Staudt and H. Meyerhenke: Engineering High-Performance Community Detection Heuristics for Massive Graphs. In: <i>Proceedings of the 2013 International Conference on\n Parallel Processing</i>. [<a href=\"http://arxiv.org/abs/1304.4453\">updated and extended version on arXiv</a>,\n <a href=\"https://networkit.iti.kit.edu/data/uploads/publications/sm2013ehpcdh.bib\">bibtex</a>]\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract</b> The amount of graph-structured data has recently experienced an enormous growth in many applications. To transform such data into useful information,\n high-performance analytics algorithms and software tools are necessary. One common graph analytics kernel is community detection (or graph clustering). Despite\n extensive research on heuristic solvers for this task, only few parallel codes exist, although parallelism will be necessary to scale to the data volume of\n real-world applications. We address the deficit in computing capability by a flexible and extensible community detection framework with shared-memory parallelism.\n Within this framework we design and implement efficient parallel community detection heuristics: A parallel label propagation scheme; the first large-scale\n parallelization of the well-known Louvain method, as well as an extension of the method adding refinement; and an ensemble scheme combining the strengths of the\n above. In extensive experiments driven by the algorithm engineering paradigm, we identify the most successful parameters and combinations of these algorithms.\n We also compare our implementations with state of the art competitors. The processing rate of our fastest algorithm often exceeds 10M edges/second, making it\n suitable for massive data streams. We recommend the parallel Louvain method and our variant with refinement as both qualitatively strong and relatively fast.\n Moreover, our fast ensemble algorithm yields a good tradeoff between quality and speed for community detection in very large networks.\n </div>\n </li>\n\n </ul>\n\n\n|separator|\n\n\nPublications Using NetworKit\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. raw:: html\n\n <ul>\n <li>\n M. Riondato, E. Upfal: ABRA: Approximating Betweenness Centrality in Static and Dynamic Graphs with Rademacher Averages.\n To appear in <i>Proc. 22nd ACM SIGKDD Conference on Knowledge Discovery and Data Mining</i> (KDD 2016), August 2016. [<a href=\"http://arxiv.org/abs/1602.05866\">arXiv</a>]\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract</b> We present ABRA, a suite of algorithms that compute and maintain probabilistically-guaranteed, high-quality, approximations of the betweenness centrality of all nodes\n (or edges) on both static and fully dynamic graphs. Our algorithms rely on random sampling and their analysis leverages on Rademacher averages and pseudodimension, fundamental\n concepts from statistical learning theory. To our knowledge, this is the first application of these concepts to the field of graph analysis. The results of our experimental evaluation\n show that our approach is much faster than exact methods, and vastly outperforms, in both speed and number of samples, current state-of-the-art algorithms with the same quality guarantees.\n </div>\n </li>\n\n <br>\n\n <li>\n M. von Looz, M. Wolter, C. Jacob, H. Meyerhenke: Better partitions of protein graphs for subsystem quantum chemistry. To appear in <i>Proc. 15th Intl. Symp. on Experimental\n Algorithms</i> (SEA 2016), June 2016.\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract</b> Determining the interaction strength between proteins and small molecules is key to analyzing their biological function. Quantum-mechanical calculations\n such as Density Functional Theory (DFT) give accurate and theoretically well-founded results. With common implementations the running time of DFT calculations increases\n quadratically with molecule size. Thus, numerous subsystem-based approaches have been developed to accelerate quantum-chemical calculations. These approaches partition\n the protein into different fragments, which are treated separately. Interactions between different fragments are approximated and introduce inaccuracies in the\n calculated interaction energies. <br>\n To minimize these inaccuracies, we represent the amino acids and their interactions as a weighted graph in order to apply graph partitioning. None of the existing graph\n partitioning work can be directly used, though, due to the unique constraints in partitioning such protein graphs. We therefore present and evaluate several algorithms,\n partially building upon established concepts, but adapted to handle the new constraints. For the special case of partitioning a protein along the main chain, we\n also present an efficient dynamic programming algorithm that yields provably optimal results. In the general scenario our algorithms usually improve the previous\n approach significantly and take at most a few seconds.\n </div>\n </li>\n\n <br>\n\n <li>\n P. Crescenzi, G. D’Angelo, L. Severini, Y. Velaj: Greedily Improving Our Own Centrality in A Network. In <i>Proc. 14th Intl. Symp. on Experimental Algorithms</i> (SEA 2015).\n LNCS 9125, pp. 43-55. Springer International Publishing, 2015.\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n <b>Abstract</b> The closeness and the betweenness centralities are two well-known measures of importance of a vertex within a given complex network. Having high\n closeness or betweenness centrality can have positive impact on the vertex itself: hence, in this paper we consider the problem of determining how much a vertex\n can increase its centrality by creating a limited amount of new edges incident to it. We first prove that this problem does not admit a polynomial-time approximation\n scheme (unless \\(P=NP\\)), and we then propose a simple greedy approximation algorithm (with an almost tight approximation ratio), whose performance is then tested\n on synthetic graphs and real-world networks.\n </div>\n </li>\n\n <br>\n\n <li>\n D. Hoske, D. Lukarski, H. Meyerhenke, M. Wegner: Is Nearly-linear the same in Theory and Practice? A Case Study with a Combinatorial Laplacian Solver. In <i>Proc. 14th Intl.\n Symp. on Experimental Algorithms</i> (SEA 2015). LNCS 9125, pp. 205-218. Springer International Publishing, 2015. [<a href=\"http://arxiv.org/abs/1502.07888\">arXiv</a>]\n <button type=\"button\" class=\"btn-link collapsed\" data-toggle=\"collapse\" data-target=\"#collapseDiv\"></button>\n <div id=\"collapseDiv\" class=\"collapse\">\n For the paper follow the arXiv link above. If you are interested in the implementation, see ParCo's <a href=\"http://parco.iti.kit.edu/software-en.shtml\" >software page</a>.\n </div>\n </li>\n\n </ul>\n\n\n|separator|\n\nProjects Using NetworKit\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nFurther projects using NetworKit can be found `here <projects.html>`_.\n\n\n|separator|\n\nStudent Theses Using NetworKit\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nA list of student theses based on NetworKit can be found `here <student_theses.html>`_.\n" }, { "alpha_fraction": 0.6547865867614746, "alphanum_fraction": 0.6590842008590698, "avg_line_length": 26.655736923217773, "blob_id": "dfb5ada6d4c0d0631083bd2f64ac5ef7bac78aca", "content_id": "74b3cbd6e80e619b940d27a3c6a200c49869d857", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 13496, "license_type": "permissive", "max_line_length": 180, "num_lines": 488, "path": "/networkit/cpp/algebraic/CSRMatrix.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CSRMatrix.h\n *\n * Created on: May 6, 2015\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef CSRMATRIX_H_\n#define CSRMATRIX_H_\n\n#include <vector>\n#include \"../Globals.h\"\n#include \"Vector.h\"\n#include \"../graph/Graph.h\"\n#include \"../algebraic/SparseAccumulator.h\"\n#include \"../auxiliary/Timer.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup algebraic\n * The CSRMatrix class represents a sparse matrix stored in CSR-Format (i.e. compressed sparse row).\n * If speed is important, use this CSRMatrix instead of the Matrix class.\n */\nclass CSRMatrix {\nprivate:\n\tstd::vector<index> rowIdx;\n\tstd::vector<index> columnIdx;\n\tstd::vector<double> nonZeros;\n\n\tcount nRows;\n\tcount nCols;\n\tbool isSorted;\n\n\tvoid quicksort(index left, index right);\n\tindex partition(index left, index right);\n\npublic:\n\t/** Represents a matrix entry s.t. matrix(row, column) = value */\n\tstruct Triple {\n\t\tindex row;\n\t\tindex column;\n\t\tdouble value;\n\t};\n\n\t/** Default constructor */\n\tCSRMatrix();\n\n\tCSRMatrix(const count nRows, const count nCols, const std::vector<std::pair<index, index>> &positions, const std::vector<double> &values, bool isSorted = false);\n\n\tCSRMatrix(const count nRows, const count nCols, const std::vector<Triple> &triples, bool isSorted = false);\n\n\tCSRMatrix(const count nRows, const count nCols, const std::vector<std::vector<index>> &columnIdx, const std::vector<std::vector<double>> &values, bool isSorted = false);\n\n\tCSRMatrix(const count nRows, const count nCols, const std::vector<index> &rowIdx, const std::vector<index> &columnIdx, const std::vector<double> &nonZeros, bool isSorted = false);\n\n\tCSRMatrix (const CSRMatrix &other) = default;\n\n\tCSRMatrix (CSRMatrix &&other) = default;\n\n\tvirtual ~CSRMatrix() = default;\n\n\tCSRMatrix& operator=(CSRMatrix &&other) = default;\n\n\tCSRMatrix& operator=(const CSRMatrix &other) = default;\n\n\t/**\n\t * @return Number of rows.\n\t */\n\tinline count numberOfRows() const {\n\t\treturn nRows;\n\t}\n\n\t/**\n\t * @return Number of columns.\n\t */\n\tinline count numberOfColumns() const {\n\t\treturn nCols;\n\t}\n\n\t/**\n\t * @param i The row index.\n\t * @return Number of non-zeros in row @a i.\n\t */\n\tcount nnzInRow(const index i) const;\n\n\t/**\n\t * @return Number of non-zeros in this matrix.\n\t */\n\tcount nnz() const;\n\n\t/**\n\t * @return Value at matrix position (i,j).\n\t */\n\tdouble operator()(const index i, const index j) const;\n\n\t/**\n\t * Sorts the column indices in each row for faster access.\n\t */\n\tvoid sort();\n\n\t/**\n\t * @return True if the matrix is sorted, otherwise false.\n\t */\n\tbool sorted() const;\n\n\t/**\n\t * @return Row @a i of this matrix as vector.\n\t */\n\tVector row(const index i) const;\n\n\t/**\n\t * @return Column @a j of this matrix as vector.\n\t */\n\tVector column(const index j) const;\n\n\t/**\n\t * @return The main diagonal of this matrix.\n\t */\n\tVector diagonal() const;\n\n\t/**\n\t * Adds this matrix to @a other and returns the result.\n\t * @return The sum of this matrix and @a other.\n\t */\n\tCSRMatrix operator+(const CSRMatrix &other) const;\n\n\t/**\n\t * Adds @a other to this matrix.\n\t * @return Reference to this matrix.\n\t */\n\tCSRMatrix& operator+=(const CSRMatrix &other);\n\n\t/**\n\t * Subtracts @a other from this matrix and returns the result.\n\t * @return The difference of this matrix and @a other.\n\t *\n\t */\n\tCSRMatrix operator-(const CSRMatrix &other) const;\n\n\t/**\n\t * Subtracts @a other from this matrix.\n\t * @return Reference to this matrix.\n\t */\n\tCSRMatrix& operator-=(const CSRMatrix &other);\n\n\t/**\n\t * Multiplies this matrix with a scalar specified in @a scalar and returns the result.\n\t * @return The result of multiplying this matrix with @a scalar.\n\t */\n\tCSRMatrix operator*(const double &scalar) const;\n\n\t/**\n\t * Multiplies this matrix with a scalar specified in @a scalar.\n\t * @return Reference to this matrix.\n\t */\n\tCSRMatrix& operator*=(const double &scalar);\n\n\t/**\n\t * Multiplies this matrix with @a vector and returns the result.\n\t * @return The result of multiplying this matrix with @a vector.\n\t */\n\tVector operator*(const Vector &vector) const;\n\n\t/**\n\t * Multiplies this matrix with @a other and returns the result in a new matrix.\n\t * @return The result of multiplying this matrix with @a other.\n\t */\n\tCSRMatrix operator*(const CSRMatrix &other) const;\n\n\t/**\n\t * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix.\n\t * @return The result of dividing this matrix by @a divisor.\n\t */\n\tCSRMatrix operator/(const double &divisor) const;\n\n\t/**\n\t * Divides this matrix by a divisor specified in @a divisor.\n\t * @return Reference to this matrix.\n\t */\n\tCSRMatrix& operator/=(const double &divisor);\n\n\t/**\n\t * Creates a submatrix of this matrix consisting of the rows specified in @a rows and columns specified in @a columns.\n\t * @param rows The row indices referencing the rows to include in the submatrix.\n\t * @param columns The column indices referencing the columns to include in the submatrix.\n\t * @return The submatrix of this matrix consisting of @a rows and @a columns.\n\t */\n\tCSRMatrix subMatrix(const std::vector<index> &rows, const std::vector<index> &columns) const;\n\n\t/**\n\t * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B.\n\t * @param A Sorted CSRMatrix.\n\t * @param B Sorted CSRMatrix.\n\t * @param binaryOp Function handling (double, double) -> double\n\t * @return @a A @a binaryOp @a B.\n\t * @note @a A and @a B must have the same dimensions and must be sorted.\n\t */\n\ttemplate<typename L> static CSRMatrix binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp);\n\n\t/**\n\t * Computes @a A^T * @a B.\n\t * @param A\n\t * @param B\n\t * @return @a A^T * @a B.\n\t * @note The number of rows of @a A must be equal to the number of rows of @a B.\n\t */\n\tstatic CSRMatrix mTmMultiply(const CSRMatrix &A, const CSRMatrix &B);\n\n\t/**\n\t * Computes @a A * @a B^T.\n\t * @param A\n\t * @param B\n\t * @return @a A * @a B^T.\n\t * @note The number of columns of @a A must be equal to the number of columns of @a B.\n\t */\n\tstatic CSRMatrix mmTMultiply(const CSRMatrix &A, const CSRMatrix &B);\n\n\t/**\n\t * Computes @a matrix^T * @a vector.\n\t * @param matrix\n\t * @param vector\n\t * @return @a matrix^T * @a vector.\n\t * @note The number of rows of @a matrix must be equal to the dimension of @a vector.\n\t */\n\tstatic Vector mTvMultiply(const CSRMatrix &matrix, const Vector &vector);\n\n\t/**\n\t * Compute the (weighted) Laplacian of the (weighted) @a graph.\n\t * @param graph\n\t * @return The (weighted) Laplacian.\n\t */\n\tstatic CSRMatrix graphLaplacian(const Graph &graph);\n\n\t/**\n\t * Compute the (weighted) adjacency matrix of the (weighted) @a graph.\n\t * @param graph\n\t * @return The (weighted) adjacency matrix.\n\t */\n\tstatic CSRMatrix adjacencyMatrix(const Graph &graph);\n\n\t/**\n\t * Computes a graph having the given @a laplacian.\n\t * @param laplacian\n\t * @return The graph having a Laplacian equal to @a laplacian.\n\t */\n\tstatic Graph laplacianToGraph(const CSRMatrix &laplacian);\n\n\t/**\n\t * Interprets the @a matrix as adjacency matrix of a graph. If @a matrix is non-symmetric, the graph will be directed.\n\t * @param matrix\n\t * @return The graph having an adjacency matrix equal to @a matrix.\n\t */\n\tstatic Graph matrixToGraph(const CSRMatrix &matrix);\n\n\t/**\n\t * Checks if @a matrix is symmetric.\n\t * @param matrix\n\t * @return True if @a matrix is symmetric, otherwise false.\n\t */\n\tstatic bool isSymmetric(const CSRMatrix &matrix);\n\n\t/**\n\t * Checks if @a matrix is symmetric diagonally dominant (SDD).\n\t * @param matrix\n\t * @return True if @a matrix is SDD, false otherwise.\n\t */\n\tstatic bool isSDD(const CSRMatrix &matrix);\n\n\t/**\n\t * Checks if @a matrix is a Laplacian matrix.\n\t * @param matrix\n\t * @return True if @a matrix is a Laplacian matrix, false otherwise.\n\t */\n\tstatic bool isLaplacian(const CSRMatrix &matrix);\n\n\t/**\n\t * Transposes this matrix and returns it.\n\t * @return The transposed matrix of this matrix.\n\t */\n\tCSRMatrix transpose() const;\n\n\t/**\n\t * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value)\n\t */\n\ttemplate<typename L> void forNonZeroElementsInRow(index i, L handle) const;\n\n\t/**\n\t * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value)\n\t */\n\ttemplate<typename L> void parallelForNonZeroElementsInRow(index i, L handle) const;\n\n\t/**\n\t * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure).\n\t */\n\ttemplate<typename L> void forNonZeroElementsInRowOrder(L handle) const;\n\n\t/**\n\t * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix.\n\t */\n\ttemplate<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const;\n\n\t/**\n\t * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix.\n\t */\n\ttemplate<typename L> void parallelForNonZeroElementsInRowOrder(L handle);\n};\n\ntemplate<typename L> inline CSRMatrix NetworKit::CSRMatrix::binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp) {\n\tassert(A.nRows == B.nRows && A.nCols == B.nCols);\n\n\tif (!A.sorted() || !B.sorted()) throw std::runtime_error(\"The matrices must be sorted for this operation\");\n\tstd::vector<index> rowIdx(A.nRows+1);\n\tstd::vector<std::vector<index>> columns(A.nRows);\n\n\trowIdx[0] = 0;\n#pragma omp parallel for\n\tfor (index i = 0; i < A.nRows; ++i) {\n\t\tindex k = A.rowIdx[i];\n\t\tindex l = B.rowIdx[i];\n\t\twhile (k < A.rowIdx[i+1] && l < B.rowIdx[i+1]) {\n\t\t\tif (A.columnIdx[k] < B.columnIdx[l]) {\n\t\t\t\tcolumns[i].push_back(A.columnIdx[k]);\n\t\t\t\t++k;\n\t\t\t} else if (A.columnIdx[k] > B.columnIdx[l]) {\n\t\t\t\tcolumns[i].push_back(B.columnIdx[l]);\n\t\t\t\t++l;\n\t\t\t} else { // A.columnIdx[k] == B.columnIdx[l]\n\t\t\t\tcolumns[i].push_back(A.columnIdx[k]);\n\t\t\t\t++k;\n\t\t\t\t++l;\n\t\t\t}\n\t\t\t++rowIdx[i+1];\n\t\t}\n\n\t\twhile (k < A.rowIdx[i+1]) {\n\t\t\tcolumns[i].push_back(A.columnIdx[k]);\n\t\t\t++k;\n\t\t\t++rowIdx[i+1];\n\t\t}\n\n\t\twhile (l < B.rowIdx[i+1]) {\n\t\t\tcolumns[i].push_back(B.columnIdx[l]);\n\t\t\t++l;\n\t\t\t++rowIdx[i+1];\n\t\t}\n\t}\n\n\n\tfor (index i = 0; i < A.nRows; ++i) {\n\t\trowIdx[i+1] += rowIdx[i];\n\t}\n\n\tcount nnz = rowIdx[A.nRows];\n\tstd::vector<index> columnIdx(nnz);\n\tstd::vector<double> nonZeros(nnz, 0.0);\n\n#pragma omp parallel for\n\tfor (index i = 0; i < A.nRows; ++i) {\n\t\tfor (index cIdx = rowIdx[i], j = 0; cIdx < rowIdx[i+1]; ++cIdx, ++j) {\n\t\t\tcolumnIdx[cIdx] = columns[i][j];\n\t\t}\n\t\tcolumns[i].clear();\n\t\tcolumns[i].resize(0);\n\t\tcolumns[i].shrink_to_fit();\n\t}\n\n#pragma omp parallel for\n\tfor (index i = 0; i < A.nRows; ++i) {\n\t\tindex k = A.rowIdx[i];\n\t\tindex l = B.rowIdx[i];\n\t\tfor (index cIdx = rowIdx[i]; cIdx < rowIdx[i+1]; ++cIdx) {\n\t\t\tif (k < A.rowIdx[i+1] && columnIdx[cIdx] == A.columnIdx[k]) {\n\t\t\t\tnonZeros[cIdx] = A.nonZeros[k];\n\t\t\t\t++k;\n\t\t\t}\n\n\t\t\tif (l < B.rowIdx[i+1] && columnIdx[cIdx] == B.columnIdx[l]) {\n\t\t\t\tnonZeros[cIdx] = binaryOp(nonZeros[cIdx], B.nonZeros[l]);\n\t\t\t\t++l;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn CSRMatrix(A.nRows, A.nCols, rowIdx, columnIdx, nonZeros, true);\n\n\n//\tstd::vector<int64_t> columnPointer(A.nCols, -1);\n//\tstd::vector<double> Arow(A.nCols, 0.0);\n//\tstd::vector<double> Brow(A.nCols, 0.0);\n//\tstd::vector<Triple> triples;\n//\n//\tfor (index i = 0; i < A.nRows; ++i) {\n//\t\tindex listHead = 0;\n//\t\tcount nnz = 0;\n//\n//\t\t// search for nonZeros in our own matrix\n//\t\tfor (index k = A.rowIdx[i]; k < A.rowIdx[i+1]; ++k) {\n//\t\t\tindex j = A.columnIdx[k];\n//\t\t\tArow[j] = A.nonZeros[k];\n//\n//\t\t\tcolumnPointer[j] = listHead;\n//\t\t\tlistHead = j;\n//\t\t\tnnz++;\n//\t\t}\n//\n//\t\t// search for nonZeros in the other matrix\n//\t\tfor (index k = B.rowIdx[i]; k < B.rowIdx[i+1]; ++k) {\n//\t\t\tindex j = B.columnIdx[k];\n//\t\t\tBrow[j] = B.nonZeros[k];\n//\n//\t\t\tif (columnPointer[j] == -1) { // our own matrix does not have a nonZero entry in column j\n//\t\t\t\tcolumnPointer[j] = listHead;\n//\t\t\t\tlistHead = j;\n//\t\t\t\tnnz++;\n//\t\t\t}\n//\t\t}\n//\n//\t\t// apply operator on the found nonZeros in A and B\n//\t\tfor (count k = 0; k < nnz; ++k) {\n//\t\t\tdouble value = binaryOp(Arow[listHead], Brow[listHead]);\n//\t\t\tif (value != 0.0) {\n//\t\t\t\ttriples.push_back({i, listHead, value});\n//\t\t\t}\n//\n//\t\t\tindex temp = listHead;\n//\t\t\tlistHead = columnPointer[listHead];\n//\n//\t\t\t// reset for next row\n//\t\t\tcolumnPointer[temp] = -1;\n//\t\t\tArow[temp] = 0.0;\n//\t\t\tBrow[temp] = 0.0;\n//\t\t}\n//\n//\t\tnnz = 0;\n//\t}\n//\n//\treturn CSRMatrix(A.nRows, A.nCols, triples);\n\n}\n\n} /* namespace NetworKit */\n\ntemplate<typename L>\ninline void NetworKit::CSRMatrix::forNonZeroElementsInRow(index i, L handle) const {\n\tfor (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) {\n\t\thandle(columnIdx[k], nonZeros[k]);\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRow(index i, L handle) const {\n#pragma omp parallel for\n\tfor (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) {\n\t\thandle(columnIdx[k], nonZeros[k]);\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::CSRMatrix::forNonZeroElementsInRowOrder(L handle) const {\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tfor (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) {\n\t\t\thandle(i, columnIdx[k], nonZeros[k]);\n\t\t}\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRowOrder(L handle) const {\n#pragma omp parallel for\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tfor (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) {\n\t\t\thandle(i, columnIdx[k], nonZeros[k]);\n\t\t}\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRowOrder(L handle) {\n#pragma omp parallel for\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tfor (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) {\n\t\t\thandle(i, columnIdx[k], nonZeros[k]);\n\t\t}\n\t}\n}\n\n#endif /* TESTMATRIX_H_ */\n" }, { "alpha_fraction": 0.6787264943122864, "alphanum_fraction": 0.6874095797538757, "avg_line_length": 18.742856979370117, "blob_id": "7d6a72dd906eec690b88ec44bb4303dcbff66ceb", "content_id": "6c3a71272063424f6374eaadf52e45a290c5e19c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 691, "license_type": "permissive", "max_line_length": 47, "num_lines": 35, "path": "/networkit/cpp/linkprediction/test/DistMeasuresGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DistMeasureTest.h\n *\n * Created on: Jun 11, 2013\n * Author: Henning\n */\n#ifndef NOGTEST\n\n#ifndef DISTMEASURESGTEST_H_\n#define DISTMEASURESGTEST_H_\n\n#include <gtest/gtest.h>\n#include <cstdio>\n\n\n#include \"../../graph/Graph.h\"\n#include \"../../viz/PostscriptWriter.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../io/DibapGraphReader.h\"\n#include \"../../structures/Partition.h\"\n#include \"../../community/Modularity.h\"\n#include \"../AlgebraicDistanceIndex.h\"\n\nnamespace NetworKit {\n\nclass DistMeasuresGTest: public testing::Test {\npublic:\n\tDistMeasuresGTest();\n\tvirtual ~DistMeasuresGTest();\n};\n\n} /* namespace NetworKit */\n#endif /* DISTMEASURETEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.44920119643211365, "alphanum_fraction": 0.45389997959136963, "avg_line_length": 41.37610626220703, "blob_id": "e1ef9c8d7e5518352c8049f8e880c4c0cb10a8da", "content_id": "7cb0bde51e745aa1ceee0a8e9ee42bbac3ecb6a5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9577, "license_type": "permissive", "max_line_length": 127, "num_lines": 226, "path": "/networkit/cpp/io/KONECTGraphReader.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * KONECTGraphReader.cpp\n * \n * Reader for the KONECT graph format, \n * based on the EdgeListReader.\n * \n * The KONECT format is described in detail in \n * http://konect.uni-koblenz.de/downloads/konect-handbook.pdf\n */\n\n#include \"KONECTGraphReader.h\"\n#include \"../auxiliary/Log.h\"\n\n#include <sstream>\n\n#include \"../auxiliary/Enforce.h\"\n\n#include <algorithm>\n\nnamespace NetworKit {\n\n KONECTGraphReader::KONECTGraphReader(char separator, bool ignoreLoops) :\n separator(separator), commentPrefix(\"%\"), firstNode(1), ignoreLoops(ignoreLoops) {\n }\n\n Graph KONECTGraphReader::read(const std::string& path) {\n return readContinuous(path);\n }\n Graph KONECTGraphReader::readContinuous(const std::string& path) {\n\n std::ifstream file(path);\n Aux::enforceOpened(file);\n std::string line; // the current line\n\n\n DEBUG(\"separator: \", this->separator);\n DEBUG(\"first node: \", this->firstNode);\n\n // first find out the maximum node id\n DEBUG(\"first pass\");\n \t// first run through the file determines if the graph is directed and/or weighted and checks the consistency of the file\n\t// IF NEEDED: try to improve performance by storing edges in a vector or map during first pass.\n\tnode maxNode = 0;\n count i = 0;\n\n // the number of vertices / edges as specified in the input file\n int n = -1, m = -1;\n\t// directed or weighted graph?\n bool directed = true, weighted = false;\n // the minimum number of values per line\n unsigned int minValuesPerLine = 2;\n\t// attempt to detect a tab separator character \n\tbool detectSeparatorAttempt = true;\n\n\n while (file.good()) {\n ++i;\n std::getline(file, line);\n // TRACE(\"read line: \" , line);\n if (line.compare(0, this->commentPrefix.length(), this->commentPrefix) == 0) {\n if (i == 1) {\n\t\t // first comment line determines if graph is directed/undirected and weighted/unweighted\n std::vector<std::string> split = Aux::StringTools::split(line, ' ');\n if (split.size() < 3) {\n std::stringstream message;\n message << \"malformed line - first line must contain graph format and weight information, in line \";\n message << i << \": \" << line;\n throw std::runtime_error(message.str());\n }\n if (split[1] == \"sym\" || split[1] == \"bip\") {\n directed = false;\n } else if (split[1] == \"asym\") {\n directed = true;\n } else {\n std::stringstream message;\n message << \"malformed line - first line must give the graph format (sym/asym/bip), in line \";\n message << i << \": \" << line;\n throw std::runtime_error(message.str());\n }\n\n if (split[2] == \"unweighted\" || split[2] == \"positive\") {\n weighted = false;\n\t\t\t// NOTE: positive means, that there can be multiple edges. currently, these will be ignored. \n // graph must only contain source and target ids\n minValuesPerLine = 2;\n } else if (split[2] == \"posweighted\" || split[2] == \"signed\" || split[2] == \"weighted\") {\n weighted = true;\n // graph must contain source and target ids and weight!\n minValuesPerLine = 3;\n } else {\n\t\t\tstd::stringstream message;\n\t\t\tmessage << \"graph types \\\"multiweighted\\\" and \\\"dynamic\\\" are not supported yet, found in line \";\n\t\t\tmessage << i << \": \" << line;\n\t\t\tthrow std::runtime_error(message.str());\n\t\t }\n } else if (i == 2) {\n\t\t // the second, optional comment line contains number of vertices / edges\n std::vector<std::string> split = Aux::StringTools::split(line, ' ');\n m = std::stoul(split[1]);\n n = std::stoul(split[2]);\n//\t\t TRACE(\"m is: \",m);\n//\t\t TRACE(\"n is: \",n);\n }\n } else if (line.length() == 0) {\n // TRACE(\"ignoring empty line\");\n } else {\n std::vector<std::string> split = Aux::StringTools::split(line, this->separator);\n split.erase(std::remove_if(split.begin(),split.end(),[](const std::string& s){return s.empty();}),split.end());\n\n\t\tif(detectSeparatorAttempt) {\n // one attempt is made to detect if, instead of a space, \n // a tab separator is used in the input file\n\t\t detectSeparatorAttempt = false;\n\t\t if(separator == ' ') {\n \tstd::vector<std::string> tabSplit = Aux::StringTools::split(line, '\\t');\n if(tabSplit.size() >= 2 && tabSplit.size() > split.size()) {\n DEBUG(\"detected tab separator.\");\n split = tabSplit;\n this->separator = '\\t';\n }\n\t\t }\n }\n\n\n // correct number of values?\n if (split.size() >= minValuesPerLine && split.size() <= 4) {\n TRACE(\"split into : \", split[0], \" and \", split[1]);\n node u = std::stoul(split[0]);\n node v = std::stoul(split[1]);\n if (!this->ignoreLoops || u != v) {\n if (u > maxNode) {\n maxNode = u;\n }\n if (v > maxNode) {\n maxNode = v;\n }\n }\n } else {\n std::stringstream message;\n message << \"malformed line (expecting \";\n message << minValuesPerLine << \"-4 values, \";\n message << split.size() << \" given) \";\n message << i << \": \";\n for (const auto& s : split) {\n message << s <<\", \";\n }\n throw std::runtime_error(message.str());\n }\n }\n }\n file.close();\n maxNode = maxNode - this->firstNode + 1;\n DEBUG(\"max. node id found: \", maxNode);\n\n Graph G(maxNode, weighted, directed);\n\n DEBUG(\"second pass\");\n\t// second pass adds the edges to the graph.\n file.open(path);\n i = 0; // count lines\n while (std::getline(file, line)) {\n ++i;\n if (line.compare(0, this->commentPrefix.length(), this->commentPrefix) == 0) {\n // TRACE(\"ignoring comment: \" , line);\n // comment lines already processed in first pass\n } else {\n // TRACE(\"edge line: \" , line);\n std::vector<std::string> split = Aux::StringTools::split(line, this->separator);\n split.erase(std::remove_if(split.begin(),split.end(),[](const std::string& s){return s.empty();}),split.end());\n // correct number of values?\n if (split.size() >= minValuesPerLine && split.size() <= 4) {\n node u = std::stoul(split[0]) - this->firstNode;\n node v = std::stoul(split[1]) - this->firstNode;\n if (weighted) {\n count weightIdx = (split[2].size() > 0) ? 2 : 3;\n edgeweight weight = std::stod(split[weightIdx]);\n\n if (!this->ignoreLoops || u != v) {\n if (directed) {\n if (!G.hasEdge(u, v)) {\n G.addEdge(u, v, weight);\n }\n } else {\n if (!G.hasEdge(u, v) && !G.hasEdge(v, u)) {\n G.addEdge(u, v, weight);\n }\n }\n }\n } else {\n\n if (!this->ignoreLoops || u != v) {\n if (directed) {\n if (!G.hasEdge(u, v)) {\n G.addEdge(u, v);\n }\n } else {\n if (!G.hasEdge(u, v) && !G.hasEdge(v, u)) {\n G.addEdge(u, v);\n }\n }\n }\n }\n } else {\n std::stringstream message;\n message << \"malformed line (expecting \";\n message << minValuesPerLine << \"-4 values, \";\n message << split.size() << \" given) \";\n message << i << \": \" << line;\n throw std::runtime_error(message.str());\n }\n }\n }\n file.close();\n if (m != -1 && G.numberOfEdges() != (unsigned int) m) {\n ERROR(\"KONECT file is corrupted: actual number of added edges doesn't match the specifed number of edges\");\n }\n if (n != -1 && G.numberOfNodes() != (unsigned int) n) {\n ERROR(\"KONECT file is corrupted: actual number of added vertices doesn't match the specifed number of vertices\");\n }\n\n G.shrinkToFit();\n return G;\n }\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6507760286331177, "alphanum_fraction": 0.6568736433982849, "avg_line_length": 25.607669830322266, "blob_id": "c007e751bc7bdd18b708f75140309f916e1a5876", "content_id": "74f3be1279afcb366c0961a0de33a56b0067f468", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9020, "license_type": "permissive", "max_line_length": 109, "num_lines": 339, "path": "/SConstruct", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "import os\nimport subprocess\nimport fnmatch\nimport ConfigParser\n\nhome_path = os.environ['HOME']\n\ndef checkStd(compiler):\n\tsample = open(\"sample.cpp\", \"w\")\n\tsample.write(\"\"\"\n\t#include <iostream>\n\n\t[[deprecated(\"use the function body directly instead of wrapping it in a function.\")]]\n\tvoid helloWorld() {\n\t\tstd::cout << \"Hello world\" << std::endl;\n\t}\n\n\tint main (int argc, char *argv[]) {\n\t\thelloWorld();\n\t\treturn 0;\n\t}\"\"\")\n\tsample.close()\n\tFNULL = open(os.devnull, 'w')\n\tif subprocess.call([compiler,\"-o\",\"test_build\",\"-std=c++14\",\"sample.cpp\"],stdout=FNULL,stderr=FNULL) == 0:\n\t\tstdflag = \"c++14\"\n\telif subprocess.call([compiler,\"-o\",\"test_build\",\"-std=c++11\",\"sample.cpp\"],stdout=FNULL,stderr=FNULL) == 0:\n\t\tstdflag = \"c++11\"\n\telse:\n\t\t# possibility to print warning/error\n\t\t# assume c++11\n\t\tstdflag = \"c++11\"\n\t# clean up\n\tFNULL.close()\n\tos.remove(\"sample.cpp\")\n\ttry:\n\t\tos.remove(\"test_build\")\n\texcept:\n\t\tpass\n\treturn stdflag\n\n\n# SOURCE files (including executable) will be gathered here\nsrcDir = \"networkit/cpp\"\ndef getSourceFiles(target, optimize):\n\tsource = []\n\n\t# walk source directory and find ONLY .cpp files\n\tfor (dirpath, dirnames, filenames) in os.walk(srcDir):\n\t for name in fnmatch.filter(filenames, \"*.cpp\"):\n\t\t\tsource.append(os.path.join(dirpath, name))\n\n\t# exclude files depending on target, executables will be addes later\n\txpatterns = [\"*-X.cpp\"]\n\texcluded = []\n\n\t# only the target \"Test\" requires Benchmark and GTest files\n\tif (target not in [\"Tests\"]):\n\t\t# exclude files matching following patterns\n\t\txpatterns += [\"*GTest.cpp\",\"*Benchmark.cpp\"]\n\n\tfor pattern in xpatterns:\n\t\tfor name in fnmatch.filter(source, pattern):\n\t\t\texcluded.append(name)\n\n\t#print(\"excluded source files: {0}\".format(excluded))\n\tsource = [name for name in source if name not in excluded]\n\n\t# add executable\n\tif target == \"Tests\":\n\t\tsource.append(os.path.join(srcDir, \"Unittests-X.cpp\"))\n\telif target in [\"Core\",\"Lib\"]:\n\t\tpass # no executable\n\telse:\n\t\tprint(\"Unknown target: {0}\".format(target))\n\t\tExit(1)\n\n\t# create build directory for build configuration\n\tbuildDir = \".build{0}\".format(optimize)\n\tVariantDir(buildDir, srcDir, duplicate=0)\n\n\t# modify source paths for build directory\n\tsource = [name.replace(srcDir + \"/\", buildDir + \"/\") for name in source]\n\t#print(source)\n\treturn source\n\n\nAddOption(\"--compiler\",\n\tdest=\"compiler\",\n\ttype=\"string\",\n\tnargs=1,\n\taction=\"store\",\n\thelp=\"used to pass gcc version from setup.py to SConstruct\")\n\nAddOption(\"--std\",\n\tdest=\"std\",\n\ttype=\"string\",\n\tnargs=1,\n\taction=\"store\",\n\thelp=\"used to pass std flag from setup.py to SConstruct\")\n\n# ENVIRONMENT\n\n## read environment settings from configuration file\n\nenv = Environment()\ncompiler = GetOption(\"compiler\")\nstdflag = GetOption(\"std\")\n\nif not os.path.isfile(\"build.conf\"):\n\tif not compiler == None:\n\t\t#print(\"{0} has been passed via command line\".format(compiler))\n\t\tenv[\"CC\"] = compiler\n\t\tenv[\"CXX\"] = compiler\n\telse:\n\t\tenv[\"CC\"] = os.environ['CC']\n\t\tenv[\"CXX\"] = os.environ['CXX']\n\t\tenv.Append(LIBS = [\"gtest\"])\n\t\tenv.Append(LIBPATH = [os.getenv('GTEST_LIB', \"\"), os.getenv('OPENMP_LIB', \"\")])\n\t\tenv.Append(CPPPATH = [os.getenv('GTEST_INCLUDE', \"\"), os.getenv('OPENMP_INCLUDE', \"\")])\nelse:\n\tconfPath = \"build.conf\"\n\tif not os.path.isfile(confPath):\n\t\tprint(\"The configuration file `build.conf` does not exist. You need to create it.\")\n\t\tprint(\"Use the file build.conf.example to create your build.conf\")\n\t\tExit(1)\n\n\tconf = ConfigParser.ConfigParser()\n\tconf.read([confPath]) # read the configuration file\n\n\t## compiler\n\tif compiler is None:\n\t\tcppComp = conf.get(\"compiler\", \"cpp\", \"gcc\")\n\telse:\n\t\tcppComp = compiler\n\tdefines = conf.get(\"compiler\", \"defines\", [])\t\t# defines are optional\n\tif defines is not []:\n\t\tdefines = defines.split(\",\")\n\n\n\t## C++14 support\n\tif stdflag is None:\n\t\ttry:\n\t\t\tstdflag = conf.get(\"compiler\", \"std14\")\n\t\texcept:\n\t\t\tpass\n\tif stdflag is None or len(stdflag) == 0:\n\t\t# do test compile\n\t\tstdflag = checkStd(cppComp)\n\t\t# and store it in the configuration\n\t\tconf.set(\"compiler\",\"std14\", stdflag)\n\n\t## includes\n\tstdInclude = conf.get(\"includes\", \"std\", \"\") # includes for the standard library - may not be needed\n\tgtestInclude = conf.get(\"includes\", \"gtest\")\n\tif conf.has_option(\"includes\", \"tbb\"):\n\t\ttbbInclude = conf.get(\"includes\", \"tbb\", \"\")\n\telse:\n\t\ttbbInclude = \"\"\n\n\t## libraries\n\tgtestLib = conf.get(\"libraries\", \"gtest\")\n\tif conf.has_option(\"libraries\", \"tbb\"):\n\t\ttbbLib = conf.get(\"libraries\", \"tbb\", \"\")\n\telse:\n\t\ttbbLib = \"\"\n\n\tenv[\"CC\"] = cppComp\n\tenv[\"CXX\"] = cppComp\n\n\tenv.Append(CPPDEFINES=defines)\n\tenv.Append(CPPPATH = [stdInclude, gtestInclude, tbbInclude])\n\tenv.Append(LIBS = [\"gtest\"])\n\tenv.Append(LIBPATH = [gtestLib, tbbLib])\n\n\twith open(confPath, \"w\") as f:\n\t\tconf.write(f)\n\nenv.Append(LINKFLAGS = [\"-std={}\".format(stdflag)])\n\n## CONFIGURATIONS\n\ncommonCFlags = [\"-c\", \"-fmessage-length=0\", \"-std=c99\", \"-fPIC\"]\ncommonCppFlags = [\"-std={}\".format(stdflag), \"-Wall\", \"-c\", \"-fmessage-length=0\", \"-fPIC\"]\n\ndebugCppFlags = [\"-O0\", \"-g3\", \"-DLOG_LEVEL=LOG_LEVEL_TRACE\"]\ndebugCFlags = [\"-O0\", \"-g3\"]\n\noptimizedCppFlags = [\"-O3\", \"-DNDEBUG\", \"-DLOG_LEVEL=LOG_LEVEL_INFO\"]\noptimizedCFlags = [\"-O3\"]\n\nprofileCppFlags = [\"-O2\", \"-DNDEBUG\", \"-g\", \"-pg\", \"-DLOG_LEVEL=LOG_LEVEL_INFO\"]\nprofileCFlags = [\"-O2\", \"-DNDEBUG\", \"-g\", \"-pg\"]\n\n\n# select configuration\n# custom command line options\nAddOption(\"--optimize\",\n dest=\"optimize\",\n type=\"string\",\n nargs=1,\n action=\"store\",\n help=\"specify the optimization level to build: D(ebug), O(ptimize), P(rofile)\")\n\nAddOption(\"--sanitize\",\n dest=\"sanitize\",\n type=\"string\",\n nargs=1,\n action=\"store\",\n help=\"switch on address sanitizer\")\n\n\ntry:\n optimize = GetOption(\"optimize\")\nexcept:\n print(\"ERROR: Missing option --optimize=<LEVEL>\")\n exit()\n\nsanitize = None\ntry:\n\tsanitize = GetOption(\"sanitize\")\nexcept:\n\tpass\n\n\n\n# create build directory for build configuration\n# modify source paths for build directory\n# moved to getSourceFiles()\n\n# append flags\n\n#commmon flags\nenv.Append(CFLAGS = commonCFlags)\nenv.Append(CPPFLAGS = commonCppFlags)\n\n# logging yes or no\nAddOption(\"--logging\",\n dest=\"logging\",\n type=\"string\",\n nargs=1,\n action=\"store\",\n help=\"enable logging: yes or no\")\n\nlogging = GetOption(\"logging\")\n\nif logging == \"no\":\n\tenv.Append(CPPDEFINES=[\"NOLOGGING\"]) # logging is enabled by default\n\tprint(\"INFO: Logging is now disabled\")\nelif (logging != \"yes\") and (logging != None):\n\tprint(\"INFO: unrecognized option --logging=%s\" % logging)\n\tprint(\"Logging is enabled by default\")\n\n# openmp yes or no\nAddOption(\"--openmp\",\n dest=\"openmp\",\n type=\"string\",\n nargs=1,\n action=\"store\",\n help=\"-fopenmp: yes or no\")\n\nopenmp = GetOption(\"openmp\")\n\nif (openmp == \"yes\") or (openmp == None): # with OpenMP by default\n env.Append(CPPFLAGS = [\"-fopenmp\"])\n env.Append(LINKFLAGS = [\"-fopenmp\"])\nelif (openmp == \"no\"):\n env.Append(LIBS = [\"pthread\"])\nelse:\n print(\"ERROR: unrecognized option --openmp=%s\" % openmp)\n exit()\n\n# optimize flags\nif optimize == \"Dbg\":\n env.Append(CFLAGS = debugCFlags)\n env.Append(CPPFLAGS = debugCppFlags)\nelif optimize == \"Opt\":\n env.Append(CFLAGS = optimizedCFlags)\n env.Append(CPPFLAGS = optimizedCppFlags)\nelif optimize == \"Pro\":\n\t env.Append(CFLAGS = profileCFlags)\n\t env.Append(CPPFLAGS = profileCppFlags)\nelse:\n print(\"ERROR: invalid optimize: %s\" % optimize)\n exit()\n\n# sanitize\nif sanitize:\n\tif sanitize == \"address\":\n\t\tenv.Append(CPPFLAGS = [\"-fsanitize=address\"])\n\t\tenv.Append(LINKFLAGS = [\"-fsanitize=address\"])\n\telse:\n\t\tprint(\"ERROR: invalid sanitize option\")\n\t\texit()\n\n\n# TARGET\nAddOption(\"--target\",\n dest=\"target\",\n type=\"string\",\n nargs=1,\n action=\"store\",\n help=\"select target to build\")\n\n\ntarget = GetOption(\"target\")\navailableTargets = [\"Lib\",\"Core\",\"Tests\"]\nif target in availableTargets:\n\tsource = getSourceFiles(target,optimize)\n\ttargetName = \"NetworKit-{0}-{1}\".format(target, optimize)\n\tif target in [\"Core\",\"Lib\"]:\n\t\t# do not append executable\n\t\t# env.Append(CPPDEFINES=[\"NOLOGGING\"])\n\t\tenv.Library(\"NetworKit-Core-{0}\".format(optimize), source)\n\t\tif target == \"Lib\":\n\t\t\tlibFileToLink = \"libNetworKit-Core-{0}.a\".format(optimize)\n\t\t\tlibFileTarget = \"libNetworKit.a\"\n\t\t\tif os.path.lexists(libFileTarget):\n\t\t\t\tos.remove(libFileTarget)\n\t\t\tos.symlink(libFileToLink,libFileTarget)\n\t\t\t# SCons does not support python 3 yet...\n\t\t\t#os.symlink(\"src/cpp\",\"NetworKit\",True)\n\t\t\t# to support case insensitive file systems\n\t\t\t# place the symlink for the include path in the folder include\n\t\t\tif os.path.isdir(\"include\"):\n\t\t\t\ttry:\n\t\t\t\t\tos.remove(\"include/NetworKit\")\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\tos.rmdir(\"include\")\n\t\t\tos.mkdir(\"include\")\n\t\t\tos.chdir(\"include\")\n\t\t\tsubprocess.call([\"ln\",\"-s\",\"../networkit/cpp\",\"NetworKit\"])\n\t\t\tos.chdir(\"../\")\n\n\telse:\n\t\tenv.Program(targetName, source)\nelse:\n\tprint(\"ERROR: unknown target: {0}\".format(target))\n\texit(1)\n" }, { "alpha_fraction": 0.7216035723686218, "alphanum_fraction": 0.730512261390686, "avg_line_length": 22.63157844543457, "blob_id": "41475be9ca7d96c999c9e78e0d0ece24facb8c12", "content_id": "5a48e580fa8da38315e559fbef85d2d5c8523c1f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 898, "license_type": "permissive", "max_line_length": 106, "num_lines": 38, "path": "/networkit/cpp/numerics/LAMG/Level/LevelElimination.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LevelElimination.h\n *\n * Created on: 10.01.2015\n * Author: Michael\n */\n\n#ifndef LEVELELIMINATION_H_\n#define LEVELELIMINATION_H_\n\n#include \"Level.h\"\n#include \"EliminationStage.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup numerics\n */\nclass LevelElimination : public Level {\nprivate:\n\tstd::vector<EliminationStage> coarseningStages;\n\tstd::vector<index> cIndexFine;\n\n\tvoid subVectorExtract(Vector &subVector, const Vector &vector, const std::vector<index> &elements) const;\n\npublic:\n\tLevelElimination(const CSRMatrix &A, const std::vector<EliminationStage> &coarseningStages);\n\n\tvoid coarseType(const Vector &xf, Vector &xc) const;\n\tvoid restrict(const Vector &bf, Vector &bc, std::vector<Vector> &bStages) const;\n\tvoid interpolate(const Vector &xc, Vector &xf, const std::vector<Vector> &bStages) const;\n\n\tstatic count lvl;\n};\n\n} /* namespace NetworKit */\n\n#endif /* LEVELELIMINATION_H_ */\n" }, { "alpha_fraction": 0.7220394611358643, "alphanum_fraction": 0.7351973652839661, "avg_line_length": 18.612903594970703, "blob_id": "557d63a62f15abec11cbc35b75d1bbba2d6e9470", "content_id": "435dfcb962f0144374e206a627a82e67d11d6b05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 608, "license_type": "permissive", "max_line_length": 61, "num_lines": 31, "path": "/networkit/cpp/algebraic/test/NormalizedLaplacianMatrixGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * NormalizedLaplacianMatrixGTest.h\n *\n * Created on: 25.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef NORMALIZEDLAPLACIANMATRIXGTEST_H_\n#define NORMALIZEDLAPLACIANMATRIXGTEST_H_\n\n#include \"gtest/gtest.h\"\n#include \"../NormalizedLaplacianMatrix.h\"\n#include \"../../graph/Graph.h\"\n#include \"../../io/METISGraphReader.h\"\n\nnamespace NetworKit {\n\nclass NormalizedLaplacianMatrixGTest : public testing::Test {\npublic:\n\tNormalizedLaplacianMatrixGTest();\n\tvirtual ~NormalizedLaplacianMatrixGTest();\n};\n\n\n} /* namespace NetworKit */\n\n#endif /* NORMALIZEDLAPLACIANMATRIXGTEST_H_ */\n\n#endif\n" }, { "alpha_fraction": 0.6733668446540833, "alphanum_fraction": 0.6884422302246094, "avg_line_length": 14.307692527770996, "blob_id": "fcf6bed562d3ec683d71627f0f0ce2a9cab2c979", "content_id": "35d6c8b90492dc83ad302c1cdbe625ce6ad2986c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 398, "license_type": "permissive", "max_line_length": 39, "num_lines": 26, "path": "/networkit/cpp/viz/test/VizGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PostscriptWriterGTest.h\n *\n * Created on: Apr 10, 2013\n * Author: Henning\n */\n\n#ifndef NOGTEST\n\n#ifndef POSTSCRIPTWRITERGTEST_H_\n#define POSTSCRIPTWRITERGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass VizGTest : public testing::Test {\npublic:\n\tVizGTest();\n\tvirtual ~VizGTest();\n};\n\n} /* namespace NetworKit */\n#endif /* POSTSCRIPTWRITERGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.704680860042572, "alphanum_fraction": 0.7127659320831299, "avg_line_length": 28.012346267700195, "blob_id": "4f771a87785da1a567253e59ba192f09e61b8b3e", "content_id": "7e3e86974b0b623844e8ab3be4f8995070cb7016", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2350, "license_type": "permissive", "max_line_length": 108, "num_lines": 81, "path": "/networkit/cpp/viz/MultilevelLayouter.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MultilevelLayouter.cpp\n *\n * Created on: 27.01.2014\n * Author: Henning\n */\n\n#include \"MultilevelLayouter.h\"\n#include \"FruchtermanReingold.h\"\n#include \"MaxentStress.h\"\n#include \"../community/PLP.h\"\n#include \"../io/METISGraphWriter.h\"\n#include \"../community/EdgeCut.h\"\n#include \"../coarsening/ParallelPartitionCoarsening.h\"\n\nnamespace NetworKit {\n\nconst count MultilevelLayouter::N_THRSH = 15;\n\nMultilevelLayouter::MultilevelLayouter(Point<float> bottomLeft, Point<float> topRight, bool useGivenLayout):\n\t\tLayouter(bottomLeft, topRight, useGivenLayout)\n{\n\n}\n\nvoid MultilevelLayouter::prolongCoordinates(Graph& Gcon, Graph& G) {\n\n}\n\nvoid MultilevelLayouter::draw(Graph& G) {\n\tdrawInternal(G, 0);\n}\n\nvoid MultilevelLayouter::drawInternal(Graph& G, count level) {\n\tcount n = G.numberOfNodes();\n\n\tif (n <= N_THRSH) {\n\t\t// unrecursive part: call drawing routine\n\t\tMETISGraphWriter gWriter;\n\t\tgWriter.write(G, true, \"output/test-multi-coarsest.graph\");\n\t\tDEBUG(\"initial layout by FR, G's size: \", G.numberOfNodes());\n\t\tFruchtermanReingold initLayouter(bottomLeft, topRight, false);\n\t\tinitLayouter.draw(G);\n\t\tPostscriptWriter writer;\n\t\twriter.write(G, \"output/test-multi-coarsest-FR.eps\");\n\t\tMaxentStress layouter(bottomLeft, topRight, true);\n\t\tlayouter.draw(G);\n\t\twriter.write(G, \"output/test-multi-coarsest-ME.eps\");\n\t}\n\telse {\n\t\t// compute clustering\n\t\tPLP clusterer(G);\n\t\tclusterer.run();\n\t\tPartition clustering = clusterer.getPartition();\n\t\tEdgeCut ec;\n\t\tINFO(\"Clustering: #clusters: \", clustering.numberOfSubsets(), \"; cut: \", ec.getQuality(clustering, G));\n\n\t\t// coarsen by clustering\n\t\tParallelPartitionCoarsening contracter(G, clustering);\n\t\tcontracter.run();\n\t\tGraph Gcon = contracter.getCoarseGraph();\n\t\tstd::vector<node> mapping = contracter.getFineToCoarseNodeMapping();\n\n\t\t// make recursive call\n\t\tdrawInternal(Gcon, level + 1);\n\n\t\t// apply recursive solution to current graph\n\t\tG.initCoordinates();\n\t\tG.forNodes([&](node v) {\n\t\t\tG.setCoordinate(v, Gcon.getCoordinate(mapping[v]));\n//\t\t\tTRACE(\"coordinate of \", v, \": \", G.getCoordinate(v, 0), \" / \", G.getCoordinate(v, 1));\n\t\t});\n\t\tDEBUG(\"local refinement of graph of size \", n);\n\n\t\t// run drawing code on current graph\n\t\tFruchtermanReingold layouter(bottomLeft, topRight, true); //, 50 * (level + 1), 0.1); // TODO: externalize\n\t\tlayouter.draw(G);\n\t}\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.695338785648346, "alphanum_fraction": 0.6991831064224243, "avg_line_length": 27.90277862548828, "blob_id": "31245676a0ea7a329f2d982c8c11ca57e9dad2de", "content_id": "baff3d97b865cefc2567f35687c624a8417da0da", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2081, "license_type": "permissive", "max_line_length": 95, "num_lines": 72, "path": "/networkit/cpp/graph/IncompleteDijkstra.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * IncompleteDijkstra.h\n *\n * Created on: 15.07.2014\n * Author: dhoske\n */\n\n#ifndef INCOMPLETEDIJKSTRA_H_\n#define INCOMPLETEDIJKSTRA_H_\n\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n#include <utility>\n\n#include \"Graph.h\"\n#include \"IncompleteSSSP.h\"\n#include \"../auxiliary/PrioQueue.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup graph\n * Implementation of @a IncompleteSSSP using a normal\n * Dijkstra with binary heaps.\n */\nclass IncompleteDijkstra : public IncompleteSSSP {\npublic:\n /**\n * Creates a IncompleteDijkstra instance from the sources in\n * @a sources (act like a super source) in the graph @a G.\n * The edges in @a G must have nonnegative weight and @a G should\n * not be null.\n *\n * We also consider the nodes in @a explored to not exist\n * if @a explored is not null.\n *\n * @warning We do not copy @a G or @a explored, but store a\n * non-owning pointer to them. Otherwise IncompleteDijkstra would not\n * be more efficient than normal Dijkstra. Thus, @a G and @a explored\n * must exist at least as long as this IncompleteDijkstra instance.\n *\n * @todo This is somewhat ugly, but we do not want introduce a\n * std::shared_ptr<> since @a G and @a explored could well\n * be stack allocated.\n */\n IncompleteDijkstra(const Graph* G, const std::vector<node>& sources,\n const std::unordered_set<node>* explored = nullptr);\n\n virtual bool hasNext() override;\n virtual std::pair<node, edgeweight> next() override;\n\nprivate:\n // discard duplicate elements in pq\n void discardDuplicates();\n\n // Stored reference to outside data structures\n const Graph* G;\n const std::unordered_set<node>* explored;\n\n // distances aren't stored in a vector because initialising it may be too expensive\n std::unordered_map<node, edgeweight> dists;\n // TODO: Fix Aux::PrioQueue to work with arbitrary values.\n // and use it instead.\n using PrioValue = std::pair<edgeweight, node>;\n using Prio = std::priority_queue<PrioValue, std::vector<PrioValue>, std::greater<PrioValue>>;\n Prio pq;\n};\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6827477812767029, "alphanum_fraction": 0.6858940720558167, "avg_line_length": 19.956043243408203, "blob_id": "422d263498136fae1bfdc37b920a7fd45209fd9f", "content_id": "6371102c368d8f50048df5a75cc216ef22ad9620", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1907, "license_type": "permissive", "max_line_length": 91, "num_lines": 91, "path": "/networkit/cpp/components/ConnectedComponents.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ConnectedComponents.cpp\n *\n * Created on: Dec 16, 2013\n * Author: cls\n */\n\n#ifndef CONNECTEDCOMPONENTS_H_\n#define CONNECTEDCOMPONENTS_H_\n\n#include \"../graph/Graph.h\"\n#include \"../graph/BFS.h\"\n#include \"../structures/Partition.h\"\n#include \"../base/Algorithm.h\"\n#include <unordered_set>\n\nnamespace NetworKit {\n\n/**\n * @ingroup components\n * Determines the connected components of an undirected graph.\n */\nclass ConnectedComponents : public Algorithm {\npublic:\n\t/**\n\t * Create ConnectedComponents class for Graph @a G.\n\t *\n\t * @param G The graph.\n\t */\n\tConnectedComponents(const Graph& G);\n\n\t/**\n\t * This method determines the connected components for the graph given in the constructor.\n\t */\n\tvoid run();\n\n\t/**\n\t * Get the number of connected components.\n\t *\n\t * @return The number of connected components.\n\t */\n\tcount numberOfComponents();\n\n\t/**\n\t * Get the the component in which node @a u is situated.\n\t *\n\t * @param[in]\tu\tThe node whose component is asked for.\n\t */\n\tcount componentOfNode(node u);\n\n\n\t/**\n\t * Get a Partition that represents the components.\n\t *\n\t * @return A partition representing the found components.\n\t */\n\tPartition getPartition();\n\n /**\n *Return the map from component to size\n */\n std::map<index, count> getComponentSizes();\n\n /**\n * @return Vector of components, each stored as (unordered) set of nodes.\n */\n std::vector<std::vector<node> > getComponents();\n\n\nprivate:\n\tconst Graph& G;\n\tPartition component;\n\tcount numComponents;\n\tbool hasRun;\n};\n\ninline count ConnectedComponents::componentOfNode(node u) {\n\tassert (component[u] != none);\n\tif (!hasRun) throw std::runtime_error(\"run method has not been called\");\n\treturn component[u];\n}\n\ninline count ConnectedComponents::numberOfComponents() {\n\tif (!hasRun) throw std::runtime_error(\"run method has not been called\");\n\treturn this->numComponents;\n}\n\n}\n\n\n#endif /* CONNECTEDCOMPONENTS_H_ */\n" }, { "alpha_fraction": 0.6725510358810425, "alphanum_fraction": 0.677743136882782, "avg_line_length": 25.027027130126953, "blob_id": "b862c6b5c17d1479e4168c76043c70a201abed5c", "content_id": "fa69d2395deb6b47cfdb23ff8313c3d55d85d59c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2889, "license_type": "permissive", "max_line_length": 82, "num_lines": 111, "path": "/networkit/cpp/auxiliary/PrioQueueForInts.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * BucketPQ.h\n *\n * Created on: 26.06.2015\n * Author: Henning\n */\n\n#ifndef BUCKETPQ_H_\n#define BUCKETPQ_H_\n\n#include \"../auxiliary/Log.h\"\n#include <list>\n#include <limits>\n#include \"../Globals.h\"\n\nnamespace Aux {\n\ntypedef NetworKit::index index;\ntypedef NetworKit::count count;\ntypedef std::list<index> Bucket;\nconstexpr index none = NetworKit::none;\n\n/**\n * Addressable priority queue for elements in the range [0,n) and\n * integer priorities in the range [0, maxPrio].\n * Amortized constant running time for each operation.\n */\nclass PrioQueueForInts {\nprivate:\n\tstd::vector<Bucket> buckets;\t\t\t// the actual buckets\n\tstd::vector<Bucket::iterator> nodePtr;\t// keeps track of node positions\n\tstd::vector<index> myBucket;\t\t\t// keeps track of current bucket = priority\n\tunsigned int minNotEmpty;\t\t\t\t// current min priority\n\tint maxNotEmpty;\t\t\t\t\t\t// current max priority\n\tindex maxPrio;\t\t\t\t\t\t\t// maximum admissible priority\n\tcount numElems;\t\t\t\t\t\t\t// number of elements stored\n\n\t/**\n\t * Insert element @a elem with priority @a prio.\n\t * @param[in] elem Element to be inserted, must be in range [0, n).\n\t * @param[in] prio Priority of element to be inserted, must be in range\n\t * [0, maxPrio].\n\t */\n\tvoid insert(index elem, index prio);\n\npublic:\n\t/**\n\t * Constructor that initializes the PQ with the full batch of entries.\n\t * @param[in] prios Contains the batch of n entries, where prios[i]\n\t * represents the key-value pair (i, prios[i]). Priorities must be in\n\t * range [0, maxPrio] or none (the latter means that the element does\n\t * not exist).\n\t * @param[in] maxPrio Maximum priority value.\n\t */\n\tPrioQueueForInts(std::vector<index>& prios, index maxPrio);\n\n\t/**\n\t * Destructor.\n\t */\n\t~PrioQueueForInts() = default;\n\n\t/**\n\t * Remove element with key @a key from PQ.\n\t * @param[in] elem Element to be removed.\n\t */\n\tvoid remove(index elem);\n\n\t/**\n\t * Changes priority of element @a elem to priority @a prio.\n\t * @param[in] elem Element whose priority is changed.\n\t * @param[in] prio New priority, must be in range [0, maxPrio].\n\t */\n\tvoid changePrio(index elem, index prio);\n\n\t/**\n\t * @return Element with minimum priority.\n\t */\n\tindex extractMin();\n\n\t/**\n\t * @return Element with maximum priority.\n\t */\n\tindex extractMax();\n\n\t/**\n\t * @return Arbitrary element with priority @a prio. Returns none\n\t * if no such element exists.\n\t * @param[in] Priority for which a corresponding element shall be returned,\n\t * must be in range [0, maxPrio].\n\t */\n\tindex extractAt(index prio);\n\n\t/**\n\t * @return Priority of elem @a elem.\n\t * @param[in] Element whose priority shall be returned.\n\t */\n\tindex priority(index elem);\n\n\t/**\n\t * @return True if priority queue does not contain any elements, otherwise false.\n\t */\n\tbool empty() const;\n\n\t/**\n\t * @return Number of elements contained in priority queue.\n\t */\n\tcount size() const;\n};\n\n} /* namespace Aux */\n#endif /* BUCKETPQ_H_ */\n" }, { "alpha_fraction": 0.6233684420585632, "alphanum_fraction": 0.6543157696723938, "avg_line_length": 23.869110107421875, "blob_id": "49a5875341be686c7fc63aebbcdd095491ac0d97", "content_id": "737d33d0e5499125fe94eeb7cee581fc934ae43f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4750, "license_type": "permissive", "max_line_length": 143, "num_lines": 191, "path": "/networkit/cpp/components/test/ConnectedComponentsGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ConnectedComponentsGTest.cpp\n *\n * Created on: Sep 16, 2013\n * Author: Maximilian Vogel\n */\n#ifndef NOGTEST\n\n#include \"ConnectedComponentsGTest.h\"\n\n#include \"../ConnectedComponents.h\"\n#include \"../ParallelConnectedComponents.h\"\n#include \"../StronglyConnectedComponents.h\"\n\n#include \"../../distance/Diameter.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../generators/HavelHakimiGenerator.h\"\n#include \"../../auxiliary/Log.h\"\n#include \"../../generators/DorogovtsevMendesGenerator.h\"\n\nnamespace NetworKit {\n\n\n TEST_F(ConnectedComponentsGTest, testConnectedComponentsTiny) {\n \t// construct graph\n \tGraph g;\n \tfor (count i = 0; i < 20; i++) {\n \t\tg.addNode();\n \t}\n \tg.addEdge(0,1,0);\n \tg.addEdge(1,2,0);\n \tg.addEdge(2,4,0);\n \tg.addEdge(4,8,0);\n \tg.addEdge(8,16,0);\n \tg.addEdge(16,19,0);\n\n \tg.addEdge(3,5,0);\n \tg.addEdge(5,6,0);\n \tg.addEdge(6,7,0);\n \tg.addEdge(7,9,0);\n\n \tg.addEdge(10,11,0);\n \tg.addEdge(10,18,0);\n \tg.addEdge(10,12,0);\n \tg.addEdge(18,17,0);\n\n \tg.addEdge(13,14,0);\n\n \t// initialize ConnectedComponents\n \tConnectedComponents ccs(g);\n \tccs.run();\n\n \t// check result\n \tEXPECT_EQ(5, ccs.numberOfComponents());\n \tEXPECT_TRUE(ccs.componentOfNode(0) == ccs.componentOfNode(19));\n \tEXPECT_TRUE(ccs.componentOfNode(3) == ccs.componentOfNode(7));\n }\n\n\nTEST_F(ConnectedComponentsGTest, testConnectedComponents) {\n\t// construct graph\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/astro-ph.graph\");\n\tConnectedComponents cc(G);\n\tcc.run();\n\tDEBUG(\"Number of components: \", cc.numberOfComponents());\n\tEXPECT_EQ(1029u, cc.numberOfComponents());\n}\n\nTEST_F(ConnectedComponentsGTest, testParallelConnectedComponents) {\n\tMETISGraphReader reader;\n\tstd::vector<std::string> graphs = {\"astro-ph\", \"PGPgiantcompo\",\n\t\t\t\"caidaRouterLevel\", \"celegans_metabolic\", \"hep-th\", \"jazz\"};\n\n\tfor (auto graphName: graphs) {\n\t\tGraph G = reader.read(\"input/\" + graphName + \".graph\");\n\t\tParallelConnectedComponents cc(G);\n\t\tcc.runSequential();\n\t\tcount seqNum = cc.numberOfComponents();\n\t\tcc.run();\n\t\tcount parNum = cc.numberOfComponents();\n\t\tDEBUG(\"Number of components: \", seqNum);\n\t\tEXPECT_EQ(seqNum, parNum);\n\t}\n}\n\nTEST_F(ConnectedComponentsGTest, testParallelConnectedComponentsWithDeletedNodes) {\n Graph G(100);\n G.forNodePairs([&](node u, node v){\n G.addEdge(u,v);\n });\n\n\n\t{\n\t\tParallelConnectedComponents cc(G);\n\t\tcc.run();\n\t\tEXPECT_EQ(1, cc.numberOfComponents()) << \"The complete graph has just one connected component\";\n\t}\n\n\tfor (node u = 0; u < 10; ++u) {\n\t\tG.forNeighborsOf(u, [&](node v) {\n\t\t\tG.removeEdge(u, v);\n\t\t});\n\t\tG.removeNode(u);\n\t}\n\n\t{\n\t\tParallelConnectedComponents cc(G);\n\t\tcc.run();\n\t\tEXPECT_EQ(1, cc.numberOfComponents()) << \"The complete graph with 10 nodes removed has still just one connected component (run())\";\n\t\tcc.runSequential();\n\t\tEXPECT_EQ(1, cc.numberOfComponents()) << \"The complete graph with 10 nodes removed has still just one connected component (runSequential())\";\n\t}\n\n}\n\nTEST_F(ConnectedComponentsGTest, benchConnectedComponents) {\n\t// construct graph\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/coAuthorsDBLP.graph\");\n\tConnectedComponents cc(G);\n\tcc.run();\n\tDEBUG(\"Number of components: \", cc.numberOfComponents());\n\tEXPECT_EQ(1u, cc.numberOfComponents());\n}\n\n\nTEST_F(ConnectedComponentsGTest, testStronglyConnectedComponents) {\n\n auto comparePartitions = [](const Partition& p1, const Partition& p2) {\n std::vector<index> partitionIdMap(p1.upperBound(), none);\n ASSERT_EQ(p1.numberOfElements(), p2.numberOfElements());\n ASSERT_EQ(p1.numberOfSubsets(), p2.numberOfSubsets());\n\n p1.forEntries([&](node v, index p) {\n if (partitionIdMap[p] == none) {\n partitionIdMap[p] = p2.subsetOf(v);\n }\n index p_mapped = partitionIdMap[p];\n ASSERT_EQ(p_mapped, p);\n });\n };\n\n\n count n = 8;\n count m = 14;\n Graph G(n, false, true);\n\n G.addEdge(0, 4);\n G.addEdge(1, 0);\n G.addEdge(2, 1);\n G.addEdge(2, 3);\n G.addEdge(3, 2);\n G.addEdge(4, 1);\n G.addEdge(5, 1);\n G.addEdge(5, 4);\n G.addEdge(5, 6);\n G.addEdge(6, 2);\n G.addEdge(6, 5);\n G.addEdge(7, 3);\n G.addEdge(7, 6);\n G.addEdge(7, 7);\n\n ASSERT_EQ(n, G.numberOfNodes());\n ASSERT_EQ(m, G.numberOfEdges());\n\n count z = G.upperNodeIdBound();\n Partition p_expected(z);\n p_expected.allToSingletons();\n p_expected[0] = 0;\n p_expected[1] = 0;\n p_expected[2] = 1;\n p_expected[3] = 1;\n p_expected[4] = 0;\n p_expected[5] = 2;\n p_expected[6] = 2;\n p_expected[7] = 3;\n p_expected.compact();\n\n StronglyConnectedComponents scc(G);\n scc.run();\n Partition p_actual = scc.getPartition();\n p_actual.compact();\n\n comparePartitions(p_expected, p_actual);\n}\n\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.5421040654182434, "alphanum_fraction": 0.5735983848571777, "avg_line_length": 16.48828125, "blob_id": "c1c4bdeb300d14023774fe1d687c7f9311251e00", "content_id": "5964650f4abe0716c6f7828dd30ce4a6940bb395", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4477, "license_type": "permissive", "max_line_length": 69, "num_lines": 256, "path": "/networkit/cpp/test/BasicsBenchmark.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * BasicsBenchmark.cpp\n *\n * Created on: 01.02.2013\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#include \"BasicsBenchmark.h\"\n\nnamespace NetworKit {\n\nTEST_F(BasicsBenchmark, sequentialSum) {\n\tAux::Timer runtime;\n\n\tint64_t n = 1e+9;\n\tdouble sum = 0.0;\n\truntime.start();\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tsum += i;\n\t}\n\truntime.stop();\n\n\tINFO(\"sum = \" , sum , \" [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\nTEST_F(BasicsBenchmark, parallelSumIncorrect) {\n\tAux::Timer runtime;\n\n\tint64_t n = 1e+9;\n\tdouble sum = 0.0;\n\truntime.start();\n\t#pragma omp parallel for\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tsum += i;\n\t}\n\truntime.stop();\n\n\tINFO(\"sum = \" , sum , \" [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\nTEST_F(BasicsBenchmark, parallelSumAtomicUpdate) {\n\tAux::Timer runtime;\n\n\tint64_t n = 1e+9;\n\tdouble sum = 0.0;\n\truntime.start();\n\t#pragma omp parallel for\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\t#pragma omp atomic update\n\t\tsum += i;\n\t}\n\truntime.stop();\n\n\tINFO(\"sum = \" , sum , \" [\" , runtime.elapsed().count() , \" ms ]\");\n\n}\n\n\nTEST_F(BasicsBenchmark, parallelSumReduction) {\n\tAux::Timer runtime;\n\n\tint64_t n = 1e+9;\n\tdouble sum = 0.0;\n\truntime.start();\n\t#pragma omp parallel for reduction(+:sum)\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tsum += i;\n\t}\n\truntime.stop();\n\n\tINFO(\"sum = \" , sum , \" [\" , runtime.elapsed().count() , \" ms ]\");\n\n}\n\n//\n//TEST_F(BasicsBenchmark, parallelSumCritical) {\n//\tAux::Timer runtime;\n//\n//\tint64_t n = 1e+9;\n//\tdouble sum = 0.0;\n//\truntime.start();\n//\t#pragma omp parallel for\n//\tfor (int64_t i = 0; i < n; ++i) {\n//\t\t#pragma omp critical\n//\t\tsum += i;\n//\t}\n//\truntime.stop();\n//\n//\tINFO(\"sum = \" , sum , \" [\" , runtime.elapsed().count() , \" ms ]\");\n//\n//}\n\n\nTEST_F(BasicsBenchmark, seqVectorWrite) {\n\tAux::Timer runtime;\n\tint64_t n = 1e+8;\n\n\tstd::vector<int64_t> vec;\n\tvec.resize(n);\n\n\truntime.start();\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tvec[i] = i;\n\t}\n\truntime.stop();\n\n\tINFO(\"vector written in [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\n\n\nTEST_F(BasicsBenchmark, parVectorWrite) {\n\tAux::Timer runtime;\n\tint64_t n = 1e+8;\n\n\tstd::vector<int64_t> vec;\n\tvec.resize(n);\n\n\truntime.start();\n\t#pragma omp parallel for\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tvec[i] = i;\n\t}\n\truntime.stop();\n\n\tINFO(\"vector written in [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\n\n\n\nTEST_F(BasicsBenchmark, lambdaSummation_seq) {\n\tAux::Timer runtime;\n\tint64_t n = 1e+9;\n\tdouble sum = 0.0;\n\n\tauto func = [&](int64_t x) {\n\t\tsum += x;\n\t};\n\n\truntime.start();\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tfunc(i);\n\t}\n\truntime.stop();\n\n\tINFO(\"sum = \" , sum , \" [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\nTEST_F(BasicsBenchmark, lambdaSummation_parWrong) {\n\tAux::Timer runtime;\n\tint64_t n = 1e+9;\n\tdouble sum = 0.0;\n\n\tauto func = [&](int64_t x) {\n\t\tsum += x;\n\t};\n\n\truntime.start();\n\t#pragma omp parallel for\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tfunc(i);\n\t}\n\truntime.stop();\n\n\tINFO(\"sum = \" , sum , \" [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\n\nTEST_F(BasicsBenchmark, lambdaSummation_par_atomic) {\n\tAux::Timer runtime;\n\tint64_t n = 1e+9;\n\tdouble sum = 0.0;\n\n\tauto func = [&](int64_t x) {\n\t\t#pragma omp atomic update\n\t\tsum += x;\n\t};\n\n\truntime.start();\n\t#pragma omp parallel for\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tfunc(i);\n\t}\n\truntime.stop();\n\n\tINFO(\"sum = \" , sum , \" [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\nTEST_F(BasicsBenchmark, lambdaSummation_par_reduction) {\n\tAux::Timer runtime;\n\tint64_t n = 1e+9;\n\tdouble sum = 0.0;\n\n\tauto func = [&](int64_t x) {\n\t\tsum += x;\n\t};\n\n\truntime.start();\n\t#pragma omp parallel for reduction(+:sum)\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tfunc(i);\n\t}\n\truntime.stop();\n\n\tINFO(\"sum = \" , sum , \" [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\n\n\nTEST_F(BasicsBenchmark, lambdaVectorWrite_seq) {\n\tAux::Timer runtime;\n\tint64_t n = 1e+8;\n\n\tstd::vector<int64_t> vec;\n\tvec.resize(n);\n\n\tauto insert = [&](int64_t i, int64_t x) {\n\t\tvec[i] = x;\n\t};\n\n\truntime.start();\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tinsert(i, i);\n\t}\n\truntime.stop();\n\n\tINFO(\"vector written in [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\nTEST_F(BasicsBenchmark, lambdaVectorWrite_par) {\n\tAux::Timer runtime;\n\tint64_t n = 1e+8;\n\n\tstd::vector<int64_t> vec;\n\tvec.resize(n);\n\n\tauto insert = [&](int64_t i, int64_t x) {\n\t\tvec[i] = x;\n\t};\n\n\truntime.start();\n\t#pragma omp parallel for\n\tfor (int64_t i = 0; i < n; ++i) {\n\t\tinsert(i, i);\n\t}\n\truntime.stop();\n\n\tINFO(\"vector written in [\" , runtime.elapsed().count() , \" ms ]\");\n}\n\n} /* namespace NetworKit */\n\n#endif /* NOGTEST */\n" }, { "alpha_fraction": 0.6741258502006531, "alphanum_fraction": 0.6867132782936096, "avg_line_length": 18.324323654174805, "blob_id": "40129d288d50081df4e143ee2f404c2597157bd8", "content_id": "8f2516b6c61377801b562841ee19cc94fc4f0df7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 715, "license_type": "permissive", "max_line_length": 142, "num_lines": 37, "path": "/networkit/cpp/io/test/IOBenchmark.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * IOBenchmark.h\n *\n * Created on: 01.02.2013\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef IOBENCHMARK_H_\n#define IOBENCHMARK_H_\n\n#include <gtest/gtest.h>\n#include <vector>\n#include <string>\n\n#include \"../../auxiliary/Log.h\"\n#include \"../../auxiliary/Timer.h\"\n#include \"../METISGraphReader.h\"\n\nusing std::vector;\nusing std::string;\n\nnamespace NetworKit {\n\nclass IOBenchmark: public testing::Test {\npublic:\n\tIOBenchmark() = default;\n\tvirtual ~IOBenchmark() = default;\n\n\tstatic void convertToHeatMap(vector<bool> &infected, vector<double> &xcoords, vector<double> &ycoords, string filename, double resolution=1);\n};\n\n} /* namespace NetworKit */\n#endif /* IOBENCHMARK_H_ */\n\n#endif /* NOGTEST */\n" }, { "alpha_fraction": 0.6817391514778137, "alphanum_fraction": 0.695652186870575, "avg_line_length": 16.96875, "blob_id": "28b45b0070558105415a5c354a7d23f67e6a00eb", "content_id": "acaf34e97d08fbdb5fea81b45570c1a3c4e56742", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 575, "license_type": "permissive", "max_line_length": 57, "num_lines": 32, "path": "/networkit/cpp/community/test/CommunityDetectionBenchmark.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CommunityDetectionBenchmark.h\n *\n * Created on: 16.05.2014\n * Author: Klara Reichard ([email protected]), Marvin Ritter ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef COMMUNITY_DETECTION_BENCHMARK_H_\n#define COMMUNITY_DETECTION_BENCHMARK_H_\n\n#include <gtest/gtest.h>\n\n#include \"../../graph/Graph.h\"\n#include \"../../io/METISGraphReader.h\"\n\nnamespace NetworKit {\n\nclass CommunityDetectionBenchmark: public testing::Test {\npublic:\n\tvirtual void SetUp();\n\nprotected:\n\tMETISGraphReader metisReader;\t\n\n};\n\n} /* namespace NetworKit */\n#endif /* COMMUNITY_DETECTION_BENCHMARK_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.7105262875556946, "alphanum_fraction": 0.7245613932609558, "avg_line_length": 17.387096405029297, "blob_id": "cfd8aae037e346e56698e9d2abcbb18b752455d1", "content_id": "3599681cbb5a42dd8828b4e00228a839de7b0bba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 570, "license_type": "permissive", "max_line_length": 57, "num_lines": 31, "path": "/networkit/cpp/numerics/test/GaussSeidelRelaxationGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GaussSeidelRelaxationGTest.h\n *\n * Created on: 03.11.2014\n * Author: Michael\n */\n\n#ifndef NOGTEST\n\n#ifndef GAUSSSEIDELRELAXATIONGTEST_H_\n#define GAUSSSEIDELRELAXATIONGTEST_H_\n\n#include \"gtest/gtest.h\"\n\n#include \"../../algebraic/GraphMatrix.h\"\n#include \"../../algebraic/Vector.h\"\n#include \"../GaussSeidelRelaxation.h\"\n\nnamespace NetworKit {\n\nclass GaussSeidelRelaxationGTest : public testing::Test {\npublic:\n\tGaussSeidelRelaxationGTest() {}\n\t~GaussSeidelRelaxationGTest() {}\n};\n\n} /* namespace NetworKit */\n\n#endif /* GAUSSSEIDELRELAXATIONGTEST_H_ */\n\n#endif\n" }, { "alpha_fraction": 0.7161571979522705, "alphanum_fraction": 0.727802038192749, "avg_line_length": 19.81818199157715, "blob_id": "ad883a74291e254d923a15511fafb227602348a3", "content_id": "8cf04ec966aa4c49332d8a49497554cc8d179e5b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 687, "license_type": "permissive", "max_line_length": 97, "num_lines": 33, "path": "/networkit/cpp/viz/MultilevelLayouter.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MultilevelLayouter.h\n *\n * Created on: 27.01.2014\n * Author: Henning\n */\n\n#ifndef MULTILEVELLAYOUTER_H_\n#define MULTILEVELLAYOUTER_H_\n\n#include \"Layouter.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup viz\n */\n// TODO: refactor to inherit from LayoutAlgorithm base class\nclass MultilevelLayouter: public NetworKit::Layouter {\nprotected:\n\tstatic const count N_THRSH;\n\npublic:\n\tMultilevelLayouter(Point<float> bottomLeft, Point<float> topRight, bool useGivenLayout = false);\n\n\tvirtual void draw(Graph& G);\n\tvirtual void drawInternal(Graph& G, count level);\n\n\tvirtual void prolongCoordinates(Graph& Gcon, Graph& G);\n};\n\n} /* namespace NetworKit */\n#endif /* MULTILEVELLAYOUTER_H_ */\n" }, { "alpha_fraction": 0.6823770403862, "alphanum_fraction": 0.7028688788414001, "avg_line_length": 20.217391967773438, "blob_id": "77f500364d4fd8e60e0a99fb26c83134d8f8f005", "content_id": "38d4f410916dfceb2718b9d748a8c96a7e7487d9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 488, "license_type": "permissive", "max_line_length": 113, "num_lines": 23, "path": "/networkit/cpp/algebraic/DiagonalMatrix.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DiagonalMatrix.h\n *\n * Created on: 13.11.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef DIAGONALMATRIX_H_\n#define DIAGONALMATRIX_H_\n\n#include \"Matrix.h\"\n\nnamespace NetworKit {\n\nclass DiagonalMatrix : public Matrix {\npublic:\n\tDiagonalMatrix(const count dimension, const std::vector<double> &values);\n\tinline DiagonalMatrix(const count dimension) : DiagonalMatrix(dimension, std::vector<double>(dimension, 1.0)) {}\n};\n\n} /* namespace NetworKit */\n\n#endif /* DIAGONALMATRIX_H_ */\n" }, { "alpha_fraction": 0.6360239386558533, "alphanum_fraction": 0.6442451477050781, "avg_line_length": 23.869888305664062, "blob_id": "a9a151051936a1db4d7ae84950f22e27e04d75f6", "content_id": "21cceab6acc15c1a4384a1cd3970bab204c8028f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6690, "license_type": "permissive", "max_line_length": 131, "num_lines": 269, "path": "/networkit/cpp/algebraic/Matrix.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Matrix.cpp\n *\n * Created on: 13.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"Matrix.h\"\n\nnamespace NetworKit {\n\nMatrix::Matrix() : graph(0, true, true), nRows(0), nCols(0) {\n}\n\nMatrix::Matrix(const count dimension) : graph(dimension, true, true), nRows(dimension), nCols(dimension) {\n}\n\nMatrix::Matrix(const count nRows, const count nCols) : graph(std::max(nRows, nCols), true, true), nRows(nRows), nCols(nCols) {\n}\n\nMatrix::Matrix(const count dimension, const std::vector<std::pair<index, index>> &positions,\n\t\t\t\t\tconst std::vector<double> &values) : graph(dimension, true, true), nRows(dimension), nCols(dimension) {\n\tassert(positions.size() == values.size());\n\n\tfor (size_t k = 0; k < positions.size(); ++k) {\n\t\tassert(positions[k].first >= 0 && positions[k].second >= 0 && positions[k].first < dimension && positions[k].second < dimension);\n\n\t\tstd::pair<node, node> pos = positions[k];\n\t\tgraph.addEdge(pos.first, pos.second, values[k]);\n\t}\n}\n\nMatrix::Matrix(const count nRows, const count nCols, const std::vector<std::pair<index, index>> &positions,\n\t\t\t\t\tconst std::vector<double> &values) : graph(std::max(nRows, nCols), true, true), nRows(nRows), nCols(nCols) {\n\tassert(positions.size() == values.size());\n\n\tfor (size_t k = 0; k < positions.size(); ++k) {\n\t\tassert(positions[k].first >= 0 && positions[k].second >= 0 && positions[k].first < nRows && positions[k].second < nCols);\n\n\t\tstd::pair<node, node> pos = positions[k];\n\t\tgraph.addEdge(pos.first, pos.second, values[k]);\n\t}\n}\n\nMatrix::Matrix(const std::vector<Vector> &rows) {\n\tassert(rows.size() > 0);\n\n\tnRows = rows.size();\n\tnCols = rows[0].getDimension();\n\tgraph = Graph(std::max(nRows, nCols), true, true);\n\n#pragma omp parallel for\n\tfor (size_t i = 0; i < nRows; ++i) {\n\t\tif (rows[i].getDimension() != nCols) {\n\t\t\tthrow std::runtime_error(\"Matrix(const std::vector<Vector> &rows): Column dimensions of one or more rows do not match\");\n\t\t}\n\t}\n\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tfor (index j = 0; j < nCols; ++j) {\n\t\t\tdouble value = rows[i][j];\n\t\t\tif (value != 0.0) { // do not store 0 values\n\t\t\t\tgraph.addEdge(i, j, value);\n\t\t\t}\n\t\t}\n\t}\n}\n\ncount Matrix::nnzInRow(const index i) const {\n\tassert(i >= 0 && i < nRows);\n\treturn graph.degree(i);\n}\n\ncount Matrix::nnz() const {\n\tcount nnz = 0;\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tnnz += nnzInRow(i);\n\t}\n\n\treturn nnz;\n}\n\ndouble Matrix::operator()(const index i, const index j) const {\n\tassert(i >= 0 && i < nRows);\n\tassert(j >= 0 && j < nCols);\n\n\treturn graph.weight(i,j);\n}\n\nvoid Matrix::setValue(const index i, const index j, const double value) {\n\tassert(i >= 0 && i < nRows);\n\tassert(j >= 0 && j < nCols);\n\n\tgraph.setWeight(i, j, value);\n}\n\nVector Matrix::row(const index i) const {\n\tassert(i >= 0 && i < nRows);\n\n\tVector row(numberOfColumns(), 0.0, true);\n\tgraph.forEdgesOf(i, [&](node i, node j, double value) {\n\t\trow[j] = value;\n\t});\n\n\treturn row;\n}\n\nVector Matrix::column(const index j) const {\n\tassert(j >= 0 && j < nCols);\n\n\tVector column(numberOfRows());\n#pragma omp parallel for\n\tfor (node i = 0; i < numberOfRows(); ++i) {\n\t\tcolumn[i] = graph.weight(i,j);\n\t}\n\n\treturn column;\n}\n\nVector Matrix::diagonal() const {\n\tVector diag(std::min(nRows, nCols), 0);\n\tfor (index i = 0; i < diag.getDimension(); ++i) {\n\t\tdiag[i] = (*this)(i,i);\n\t}\n\n\treturn diag;\n}\n\nMatrix Matrix::operator+(const Matrix &other) const {\n\treturn Matrix(*this) += other;\n}\n\nMatrix& Matrix::operator+=(const Matrix &other) {\n\tassert(nRows == other.nRows && nCols == other.nCols);\n\n\tother.forNonZeroElementsInRowOrder([&](node i, node j, double value) {\n\t\tgraph.increaseWeight(i, j, value);\n\t});\n\n\treturn *this;\n}\n\nMatrix Matrix::operator-(const Matrix &other) const {\n\treturn Matrix(*this) -= other;\n}\n\nMatrix& Matrix::operator-=(const Matrix &other) {\n\tassert(nRows == other.nRows && nCols == other.nCols);\n\n\tother.forNonZeroElementsInRowOrder([&](node i, node j, double value) {\n\t\tgraph.increaseWeight(i, j, -value);\n\t});\n\n\treturn *this;\n}\n\nMatrix Matrix::operator*(const double scalar) const {\n\treturn Matrix(*this) *= scalar;\n}\n\nMatrix& Matrix::operator*=(const double scalar) {\n\tgraph.parallelForEdges([&](node i, node j, double value) {\n\t\tgraph.setWeight(i, j, value * scalar);\n\t});\n\n\treturn *this;\n}\n\nVector Matrix::operator*(const Vector &vector) const {\n\tassert(!vector.isTransposed());\n\tassert(nCols == vector.getDimension());\n\tVector result(numberOfRows(), 0.0);\n\n\tparallelForNonZeroElementsInRowOrder([&](node i, node j, double value) {\n\t\tresult[i] += value * vector[j];\n\t});\n\n\treturn result;\n}\n\nMatrix Matrix::operator*(const Matrix &other) const {\n\tassert(nCols == other.nRows);\n\n\tMatrix result(numberOfRows(), other.numberOfColumns());\n\tSparseAccumulator spa(numberOfRows());\n\tfor (index r = 0; r < numberOfRows(); ++r) {\n\t\tgraph.forNeighborsOf(r, [&](node v, double w1){\n\t\t\tother.graph.forNeighborsOf(v, [&](node u, double w2){\n\t\t\t\tdouble value = w1 * w2;\n\t\t\t\tspa.scatter(value, u);\n\t\t\t});\n\t\t});\n\n\t\tspa.gather([&](node row, node column, double value){\n\t\t\tresult.graph.addEdge(row, column, value);\n\t\t});\n\n\t\tspa.increaseRow();\n\t}\n\n\treturn result;\n}\n\nMatrix Matrix::operator/(const double divisor) const {\n\treturn Matrix(*this) /= divisor;\n}\n\nMatrix& Matrix::operator/=(const double divisor) {\n\treturn *this *= 1 / divisor;\n}\n\nMatrix Matrix::mTmMultiply(const Matrix &A, const Matrix &B) {\n\tassert(A.nRows == B.nRows);\n\n\tMatrix C(A.numberOfColumns(), B.numberOfColumns());\n\tfor (index k = 0; k < A.numberOfRows(); ++k) {\n\t\tA.graph.forNeighborsOf(k, [&](index i, edgeweight wA) {\n\t\t\tB.graph.forNeighborsOf(k, [&](index j, edgeweight wB) {\n\t\t\t\tC.graph.increaseWeight(i, j, wA * wB);\n\t\t\t});\n\t\t});\n\t}\n\n\treturn C;\n}\n\nMatrix Matrix::mmTMultiply(const Matrix &A, const Matrix &B) {\n\tassert(A.nCols == B.nCols);\n\n\tMatrix C(A.numberOfRows(), B.numberOfRows());\n\tfor (index i = 0; i < A.numberOfRows(); ++i) {\n\t\tA.graph.forNeighborsOf(i, [&](index k, edgeweight wA){\n\t\t\tfor (index j = 0; j < B.numberOfRows(); ++j) {\n\t\t\t\tedgeweight wB = B(j,k);\n\t\t\t\tif (wB != 0.0) {\n\t\t\t\t\tC.graph.increaseWeight(i, j, wA * wB);\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n\n\treturn C;\n}\n\nVector Matrix::mTvMultiply(const Matrix &matrix, const Vector &vector) {\n\tassert(matrix.nRows == vector.getDimension());\n\n\tVector result(matrix.numberOfColumns(), 0.0);\n\tfor (index k = 0; k < matrix.numberOfRows(); ++k) {\n\t\tmatrix.graph.forNeighborsOf(k, [&](index j, edgeweight w){\n\t\t\tresult[j] += w * vector[k];\n\t\t});\n\t}\n\n\treturn result;\n}\n\nMatrix Matrix::transpose() const {\n\tMatrix transposedMatrix(numberOfColumns(), numberOfRows());\n\tparallelForNonZeroElementsInRowOrder([&](index i, index j, edgeweight weight){\n\t\ttransposedMatrix.graph.addEdge(i,j,weight);\n\t});\n\n\treturn transposedMatrix;\n}\n\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6664931178092957, "alphanum_fraction": 0.672481119632721, "avg_line_length": 32.39130401611328, "blob_id": "38092ce2f05e8f0ee71da6b6b0aae92a8e66b1a2", "content_id": "d2a333413b61dbbaf5f80a60d84695afd4ffb8b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3841, "license_type": "permissive", "max_line_length": 147, "num_lines": 115, "path": "/setup_util.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "import os\nimport subprocess\nfrom subprocess import DEVNULL\n\ndef collectExternalPackageStatus():\n\t\"\"\" This function is supposed to check if the packages, NetworKit uses are installed or not.\n\t\tIf a package is not installed, an appropriate message is created.\n\t\"\"\"\n\twarnMessages = []\n\ttry:\n\t\timport scipy\n\t\tdel scipy\n\texcept:\n\t\twarnMessages.append(\"WARNING: SciPy is not installed; to use all of networkit, please install SciPy\")\n\ttry:\n\t\timport numpy\n\t\tdel numpy\n\texcept:\n\t\twarnMessages.append(\"WARNING: numpy is not installed; to use all of networkit, please install numpy\")\n\n\ttry:\n\t\timport readline\n\t\tdel readline\n\texcept:\n\t\twarnMessages.append(\"WARNING: readline is not installed; to use all of networkit, please install readline\")\n\n\ttry:\n\t\timport matplotlib\n\t\tdel matplotlib\n\texcept:\n\t\twarnMessages.append(\"WARNING: matplotlib is not installed; to use all of networkit, please install matplotlib\")\n\n\ttry:\n\t\timport networkx\n\t\tdel networkx\n\texcept:\n\t\twarnMessages.append(\"WARNING: networkx is not installed; to use all of networkit, please install networkx\")\n\n\ttry:\n\t\timport tabulate\n\t\tdel tabulate\n\texcept:\n\t\twarnMessages.append(\"WARNING: tabulate is not installed; to use all of networkit, please install tabulate\")\n\treturn warnMessages\n\ndef determineCompiler(candidates, stdFlags):\n\t\"\"\" This function tests a list of candidates, whether they are sufficient to the requirements of \n\t\tNetworKit and focuses on C++11 and OpenMP support.\"\"\"\n\t#prepare sample.cpp file necessary to determine gcc\n\t#TODO: proper c++11 test?\n\t#TODO: generalize variable names to \"compiler\" instead of \"gcc\"...\n\tsample = open(\"sample.cpp\", \"w\")\n\tsample.write(\"\"\"/*****************************************************************************\n\t* File: sample.cpp\n\t* DESCRIPTION:\n\t* OpenMP Example - Hello World - C/C++ Version\n\t* In this simple example, the master thread forks a parallel region.\n\t* All threads in the team obtain their unique thread number and print it.\n\t* The master thread only prints the total number of threads. Two OpenMP\n\t* library routines are used to obtain the number of threads and each\n\t* thread's number.\n\t* AUTHOR: Blaise Barney 5/99\n\t* LAST REVISED: 04/06/05\n\t******************************************************************************/\n\t#include <omp.h>\n\t#include <iostream>\n\n\t[[deprecated(\"use the function body directly instead of wrapping it in a function.\")]]\n\tvoid helloWorld() {\n\t\tstd::cout << \"Hello world\" << std::endl;\n\t}\n\n\tint main (int argc, char *argv[]) {\n\t\thelloWorld();\n\t\tint nthreads, tid;\n\t\t/* Fork a team of threads giving them their own copies of variables */\n\t\t#pragma omp parallel private(nthreads, tid)\n\t\t{\n\t\t\t/* Obtain thread number */\n\t\t\ttid = omp_get_thread_num();\n\t\t\tstd::cout << \\\"Hello World from thread = \\\" << tid << std::endl;\n\t\t\t/* Only master thread does this */\n\t\t\tif (tid == 0) {\n\t\t\t\tnthreads = omp_get_num_threads();\n\t\t\t\tstd::cout << \\\"Number of threads = \\\" << nthreads << std::endl;\n\t\t\t}\n\t\t} /* All threads join master thread and disband */\n\t}\"\"\")\n\tsample.close()\n\n\tcompiler_version_satisfied = False\n\tcompiler = None\n\tstdflag = None\n\tv = 0\n\ti = 0\n\twhile not compiler_version_satisfied and i < len(stdFlags):\n\t\twhile not compiler_version_satisfied and v < len(candidates):\n\t\t\t#print(\"testing\\t{}\".format(candidates[v]))\n\t\t\ttry:\n\t\t\t\tif subprocess.call([candidates[v],\"-o\",\"test_build\",\"-std={}\".format(stdFlags[i]),\"-fopenmp\",\"sample.cpp\"],stdout=DEVNULL,stderr=DEVNULL) == 0:\n\t\t\t\t\tcompiler_version_satisfied = True\n\t\t\t\t\tcompiler = candidates[v]\n\t\t\t\t\tstdflag = stdFlags[i]\n\t\t\t\t\t#print(\"using {0} as C++ compiler with the {1} STD flag\".format(candidates[v],stdFlags[i]))\n\t\t\texcept:\n\t\t\t\t#print(\"{0} is not installed\".format(candidates[v]))\n\t\t\t\tpass\n\t\t\tv += 1\n\t\ti += 1\n\t\tv = 0\n\n\tos.remove(\"sample.cpp\")\n\tif compiler_version_satisfied:\n\t\tos.remove(\"test_build\")\n\treturn compiler, stdflag\n\n" }, { "alpha_fraction": 0.6712062358856201, "alphanum_fraction": 0.6867704391479492, "avg_line_length": 16.133333206176758, "blob_id": "cbd7d1fb31e68655db957ac0cc8eb2ce1c4799d6", "content_id": "3b75c64ab9395830bb2aa7381e51d76ec6583ef2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 514, "license_type": "permissive", "max_line_length": 48, "num_lines": 30, "path": "/networkit/cpp/graph/test/GraphDistanceGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphDistanceGTest.h\n *\n * Created on: 23.07.2013\n * Author: Henning Meyerhenke ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef GRAPHDISTANCEGTEST_H_\n#define GRAPHDISTANCEGTEST_H_\n\n#include <gtest/gtest.h>\n\n#include \"../GraphDistance.h\"\n#include \"../Graph.h\"\n#include \"../../io/METISGraphReader.h\"\n\nnamespace NetworKit {\n\nclass GraphDistanceGTest: public testing::Test {\npublic:\n\tGraphDistanceGTest();\n\tvirtual ~GraphDistanceGTest();\n};\n\n} /* namespace NetworKit */\n#endif /* GRAPHDISTANCEGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6272401213645935, "alphanum_fraction": 0.6559139490127563, "avg_line_length": 12.949999809265137, "blob_id": "b2d2d2e3cba9c4d062ef62bc00ae53c838f1b34b", "content_id": "e48f945de5e03d297cf60719b660898b96151dfa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 279, "license_type": "permissive", "max_line_length": 41, "num_lines": 20, "path": "/networkit/cpp/clique/test/CliqueGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CliqueGTest.h\n *\n * Created on: 08.12.2014\n * Author: henningm\n */\n\n#ifndef CLIQUEGTEST_H_\n#define CLIQUEGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass CliqueGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* CLIQUEGTEST_H_ */\n" }, { "alpha_fraction": 0.7705152630805969, "alphanum_fraction": 0.7705152630805969, "avg_line_length": 35.13793182373047, "blob_id": "6ec51c6b61b52ba42c2052e49dc4ab0a9a582053", "content_id": "0d6154f230956063a755f1b9d2927409bd2b7fda", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2096, "license_type": "permissive", "max_line_length": 217, "num_lines": 58, "path": "/Doc/doc/api/notebooks.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "Jupyter Notebook\n================\n\nNetworKit is designed to provide a high level of interactivity to give the user the possibility to create own workflows and a network analysis tool which is easy to use.\nThis is accomplished by providing a Python interface to use NetworKit in an interactive Python shell. Complete workflows can be created with Jupyter Notebook - a web-based interactive computing environment.\n\nTutorial Notebooks\n-----------------\n\nWe provide several example notebooks to get started with NetworKit and Jupyter Notebook. Please note that the following links are static renderings of the notebooks illustrating NetworKit's features and their outcome.\n\n|userGuide|\n\n|gephiStreaming|\n\n|spectralCentrality|\n\n|spectralCentralityPandas|\n\n|spectralColoring|\n\n|spectralDrawing|\n\n|spectralPartitioning|\n\n.. |userGuide| raw:: html\n\n\t<a href=\"http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/uploads/docs/NetworKit_UserGuide.ipynb\" target=\"_blank\">NetworKit User Guide</a>\n\n\n.. |gephiStreaming| raw:: html\n\n\t<a href=\"http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/data/uploads/GephiStreaming_UserGuide.ipynb\" target=\"_blank\">Gephi Streaming</a>\n\n\n.. |spectralCentrality| raw:: html\n\n\t<a href=\"http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/data/uploads/SpectralCentrality.ipynb\" target=\"_blank\">Spectral Centrality</a>\n\n\n.. |spectralCentralityPandas| raw:: html\n\n\t<a href=\"http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/data/uploads/SpectralCentralityWithPandas.ipynb\" target=\"_blank\">Spectral Centrality with Pandas</a>\n\n\n.. |spectralColoring| raw:: html\n\n\t<a href=\"http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/data/uploads/SpectralColoring.ipynb\" target=\"_blank\">Spectral Coloring</a>\n\n\n.. |spectralDrawing| raw:: html\n\n\t<a href=\"http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/data/uploads/SpectralDrawing.ipynb\" target=\"_blank\">Spectral Drawing</a>\n\n\n.. |spectralPartitioning| raw:: html\n\n\t<a href=\"http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/data/uploads/SpectralPartitioning.ipynb\" target=\"_blank\">Spectral Partitioning</a>\n" }, { "alpha_fraction": 0.6424015164375305, "alphanum_fraction": 0.6702626347541809, "avg_line_length": 53.66666793823242, "blob_id": "7d8d5d9d40c8c54176cdced4d6b3d2f8db482699", "content_id": "92651188a6fe1c0022909ac798fec19e4638d19f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 10660, "license_type": "permissive", "max_line_length": 733, "num_lines": 195, "path": "/Doc/doc/index.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": ".. title:: NetworKit\n\n.. toctree::\n :hidden:\n :maxdepth: 2\n\n Developer Guide <api/DevGuide>\n Jupyter Notebook <api/notebooks>\n Python Documentation <api/modules>\n C++ Documentation <api/cppdoc>\n\n.. raw:: html\n\n <section class=\"Top_Section\" style=\"clear: both; border-bottom: 1px solid #d4d7d9;\">\n <div class=\"Top_Section\" style=\"padding-top: 30px; padding-bottom: 30px\">\n <div class=\"Introduction_Text\" style=\"border-right: 1px solid #d4d7d9; display: table-cell; width: 66.66%; padding-right: 30px; text-align: justify\">\n\n**NetworKit** is a growing open-source toolkit for large-scale network analysis. Its aim is to provide tools for the analysis of large networks in the size range from thousands to billions of edges. For this purpose, it implements efficient graph algorithms, many of them parallel to utilize multicore architectures. These are meant to compute standard measures of network analysis, such as degree sequences, clustering coefficients, and centrality measures. In this respect, NetworKit is comparable to packages such as NetworkX, albeit with a focus on parallelism and scalability. NetworKit is also a testbed for algorithm engineering and contains novel algorithms from recently published research (see list of :ref:`publications`).\n\n**NetworKit** is a Python module. Performance-aware algorithms are written in C++ (often using OpenMP for shared-memory parallelism) and exposed to Python via the Cython toolchain. Python in turn gives us the ability to work interactively and with a rich environment of tools for data analysis. Furthermore, NetworKit's core can be built and used as a native library.\n\n.. raw:: html\n\n <p style=\"text-align: left; font-size:14pt; padding-top: 15px;\">Latest News</p>\n <div style=\"float: left; display: table-cell; width: 80%; padding-right: 30px\">\n\n:ref:`news-1`\n\n.. raw:: html\n\n </div>\n\n <div style=\"display: table-cell; width: 20%; padding-left: 30px\">\n <p style=\"word-break: normal\">\n <a href=\"news.html\">All News</a>\n </p>\n </div>\n\n </div>\n <div class=\"Downloads\" style=\"display: table-cell; width: 33.33%; padding-left: 30px\">\n <div>Clone from Mercurial</div>\n <span style=\"display: block;overflow: hidden;\"><input onClick=\"this.setSelectionRange(0, this.value.length)\" style=\"width: 100%\" type=\"text\" value=\"hg clone https://algohub.iti.kit.edu/parco/NetworKit/NetworKit\" readonly=\"\"/></span>\n\n <div style=\"padding-top: 15px\">Install via pip</div>\n <span style=\"display: block;overflow: hidden;\"><input onClick=\"this.setSelectionRange(0, this.value.length)\" style=\"width: 100%\" type=\"text\" value=\"pip install networkit\" readonly=\"\"/></span>\n\n <div style=\"padding-top: 15px\">Download from <a href=\"https://algohub.iti.kit.edu/parco/NetworKit/NetworKit\">Algohub</a> or as <a href=\"https://networkit.iti.kit.edu/uploads/NetworKit.zip\">Zip file</a></div>\n\n <div style=\"padding-top: 15px\">Download the <a href=\"https://networkit.iti.kit.edu/uploads/Documentation.zip\">Class Documentation</a></div>\n\n <div style=\"padding-top: 15px\">Download the <a href=\"http://arxiv.org/pdf/1403.3005v3.pdf\">Technical Report</a></div>\n\n <div style=\"padding-top: 15px;\"> <div style=\"float: left;\">Mailing List</div> <div><a style=\"padding-left: 10px\" href=\"https://lists.ira.uni-karlsruhe.de/mailman/listinfo/networkit\"><img style=\"padding-bottom:2px\" src=\"_static/mailinglist.png\"></a> </div> </div>\n\n <div style=\"padding-top: 15px\">View the <a href=\"https://lists.ira.uni-karlsruhe.de/pipermail/networkit/\">mailing list archive</a></div>\n\n <div style=\"padding-top: 15px\"><a href=\"http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/uploads/docs/NetworKit_UserGuide.ipynb\">NetworKit UserGuide</a></div>\n\n </div>\n </div>\n </section>\n\n <section class=\"MainFeatures\" style=\"clear: both; padding-top: 20px; padding-bottom: 0px;\">\n <div class=\"FeatureTable\" >\n <div style=\"text-align: center; font-size:16pt; font-weight: bold; padding-bottom: 20px;\">Main Design Goals</div>\n <div style=\"border-right: 1px solid #d4d7d9; display: table-cell; width: 33.33%; padding: 20px; padding-bottom: 0px;\">\n <p style=\"text-align: center; font-size:14pt\">Interactive Workflow</p>\n <p style=\"word-break: normal; text-align:justify;\">\n NetworKit takes inspiration from other software like R, MATLAB or Mathematica and provides an interactive shell via Python. This allows users to\n freely combine functions from NetworKit and also use the results with other popular Python packages. In combination with Jupyter Notebook, NetworKit\n provides an intuitive computing environment for scientific workflows, even on a remote compute server.\n </p>\n </div>\n\n <div style=\"border-right: 1px solid #d4d7d9; display: table-cell; width: 33.33%; padding: 20px; padding-bottom: 0px;\">\n <p style=\"text-align: center; font-size:14pt\">High Performance</p>\n <p style=\"word-break: normal; text-align:justify;\">\n In NetworKit, algorithms and data structures are selected and implemented with a combination of good software engineering as well as high performance and parallelism in mind. Some implementations are\n among the fastest in published research. For example, community detection in a 3 billion edge web graph can be performed on a 16-core server\n in a matter of a few minutes.\n </p>\n </div>\n\n <div style=\"display: table-cell; width: 33.33%; padding: 20px; padding-bottom: 0px;\">\n <p style=\"text-align: center; font-size:14pt\">Easy Integration</p>\n <p style=\"word-break: normal; text-align:justify;\">\n As a Python module, NetworKit enables seamless integration with Python libraries for scientific computing and data analysis, e.g. pandas for data framework\n processing and analytics, matplotlib for plotting, networkx for additional network analysis tasks, or numpy and scipy for numerical and scientific computing.\n Furthermore, NetworKit aims to support a variety of input/output formats.\n </p>\n </div>\n </div>\n </section>\n\n <section class=\"FeatureImages\" style=\"clear: both; padding-top: 0px; padding-bottom: 0px;\">\n <div class=\"FeatureTable\" >\n <div style=\"border-right: 1px solid #d4d7d9; display: table-cell; width: 33.33%; padding: 20px; padding-bottom: 0px;\">\n <div style=\"border-top: 1px solid #d4d7d9; margin-left: 40px; margin-right: 40px; padding-bottom: 30px;\"></div>\n\n.. code-block:: python\n\n from networkit import *\n G = readGraph(\"skitter.graph\", Format.METIS)\n print(G.toString())\n\n.. raw:: html\n\n <pre class=\"codeSpan\">'Graph(name=skitter, n=1696415, m=11095298)'</pre>\n\n.. code-block:: python\n\n cc = components.ConnectedComponents(G)\n cc.run()\n compSizes = cc.getComponentSizes()\n numCC = len(compSizes)\n maxCC = max(compSizes.values())\n print(\"#cc = %d,largest = %d\"%(numCC,maxCC))\n\n.. raw:: html\n\n <pre class=\"codeSpan\">#cc = 756,largest = 1694616</pre>\n </div>\n\n <div style=\"border-right: 1px solid #d4d7d9; display: table-cell; width: 33.33%; padding: 20px; padding-bottom: 0px;\">\n <div style=\"border-top: 1px solid #d4d7d9; margin-left: 40px; margin-right: 40px; padding-bottom: 30px;\"></div>\n\n.. code-block:: python\n\n communities = community.detectCommunities(G)\n\n.. raw:: html\n\n <pre class=\"codeSpan\" style=\"padding: 8px;\">\n PLM(balanced,pc) detected communities in 17.86 [s]\n solution properties:\n ------------------- -------------\n # communities 1637\n min community size 2\n max community size 233061\n avg. community size 1036.3\n modularity 0.825245\n ------------------- -------------\n </pre>\n </div>\n\n <div style=\"display: table-cell; width: 33.33%; padding: 20px; padding-bottom: 0px;\">\n <div style=\"border-top: 1px solid #d4d7d9; margin-left: 40px; margin-right: 40px; padding-bottom: 30px;\"></div>\n\n.. code-block:: python\n\n %matplotlib inline\n import matplotlib.pyplot as plt\n sizes = communities.subsetSizes()\n sizes.sort(reverse=True)\n plt.xscale(\"log\")\n plt.xlabel(\"community id\")\n plt.yscale(\"log\")\n plt.ylabel(\"size\")\n plt.show()\n\n.. raw:: html\n\n </div>\n </div>\n </section>\n\n <section class=\"ExampleTexts\" style=\"clear: both; padding-top: 0px; padding-bottom: 20px;\">\n <div class=\"FeatureTable\" >\n <div style=\"border-right: 1px solid #d4d7d9; display: table-cell; width: 33.33%; padding: 20px; padding-bottom: 0px;\">\n <div style=\"border-top: 1px solid #d4d7d9; margin-left: 40px; margin-right: 40px; padding-bottom: 30px;\"></div>\n <p style=\"word-break: normal; text-align:justify;\">\n Using NetworKit is as simple as importing the networkit Python package. In the example above, we then read a network of autonomous\n systems from disk and print some very basic statistics about the network. We go on by computing the connected components and outputting their number\n and size.\n </p>\n </div>\n\n <div style=\"border-right: 1px solid #d4d7d9; display: table-cell; width: 33.33%; padding: 20px; padding-bottom: 0px;\">\n <div style=\"border-top: 1px solid #d4d7d9; margin-left: 40px; margin-right: 40px; padding-bottom: 30px;\"></div>\n <p style=\"word-break: normal; text-align:justify;\">\n Continuing with the example on the left, we tell NetworKit to detect communities for the <i>skitter</i> network. Thanks to our parallel\n modularity-driven community detection algorithms, this takes only about 18 seconds on a consumer notebook even though the network has more than 11 million edges.\n </p>\n </div>\n\n <div style=\"display: table-cell; width: 33.33%; padding: 20px; padding-bottom: 0px;\">\n <div style=\"border-top: 1px solid #d4d7d9; margin-left: 40px; margin-right: 40px; padding-bottom: 30px;\"></div>\n <p style=\"word-break: normal; text-align:justify;\">\n Visualizing the size of the communities computed in the example in the middle is very easy due to the seamless integration of NetworKit into\n the Python ecosystem. We use matplotlib to plot a log-log graph of the community sizes sorted in descending order. When using Jupyter\n Notebook the resulting plot appears directly below the plot command.\n </p>\n </div>\n </div>\n </section>\n" }, { "alpha_fraction": 0.6415441036224365, "alphanum_fraction": 0.65625, "avg_line_length": 14.11111068725586, "blob_id": "e60582afe4c95aea2954b446d3a0818f5c4e4eb7", "content_id": "ba0a9557d716a17721b839a1550d452ae6c629d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 544, "license_type": "permissive", "max_line_length": 42, "num_lines": 36, "path": "/networkit/cpp/algebraic/test/MatrixGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MatrixGTest.h\n *\n * Created on: 16.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef MATRIXGTEST_H_\n#define MATRIXGTEST_H_\n\n#include \"gtest/gtest.h\"\n#include \"../Matrix.h\"\n#include \"../../graph/Graph.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../AdjacencyMatrix.h\"\n#include <math.h>\n#include <vector>\n#include <utility>\n\n\nnamespace NetworKit {\n\nclass MatrixGTest : public testing::Test {\npublic:\n\tMatrixGTest();\n\tvirtual ~MatrixGTest();\n};\n\n\n} /* namespace NetworKit */\n\n#endif /* MATRIXGTEST_H_ */\n\n#endif\n" }, { "alpha_fraction": 0.6639150977134705, "alphanum_fraction": 0.6721698045730591, "avg_line_length": 18.272727966308594, "blob_id": "506669d860dbd8d3a70fe4ccb3fcf33d2f83f3ce", "content_id": "896a304b6989f7422dac2f440f05deb1bf10d212", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 848, "license_type": "permissive", "max_line_length": 104, "num_lines": 44, "path": "/networkit/cpp/graph/Dijkstra.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Dijkstra.h\n *\n * Created on: Jul 23, 2013\n * Author: Henning, Christian Staudt\n */\n\n#ifndef DIJKSTRA_H_\n#define DIJKSTRA_H_\n\n#include \"Graph.h\"\n#include \"SSSP.h\"\n#include \"../auxiliary/PrioQueue.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup graph\n * Dijkstra's SSSP algorithm.\n */\nclass Dijkstra : public SSSP {\n\nfriend class DynDijkstra;\nfriend class DynDijkstra2;\n\npublic:\n\n\t/**\n\t * Creates the Dijkstra class for @a G and the source node @a source.\n\t *\n\t * @param G The graph.\n\t * @param source The source node.\n\t * @param storePaths\tstore paths and number of paths?\n\t */\n\tDijkstra(const Graph& G, node source, bool storePaths=true, bool storeStack=false, node target = none);\n\n\t/**\n\t * Performs the Dijkstra SSSP algorithm on the graph given in the constructor.\n\t */\n\tvirtual void run();\n};\n\n} /* namespace NetworKit */\n#endif /* DIJKSTRA_H_ */\n" }, { "alpha_fraction": 0.6401383876800537, "alphanum_fraction": 0.6678200960159302, "avg_line_length": 13.449999809265137, "blob_id": "3eadd18c12b8e524b4e307bac6914b76a675d626", "content_id": "58f59815e73c7792bc63fd4b852225b5b86c121e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 289, "license_type": "permissive", "max_line_length": 44, "num_lines": 20, "path": "/networkit/cpp/community/test/CommunityGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CommunityGTest.h\n *\n * Created on: 27.02.2014\n * Author: cls\n */\n\n#ifndef COMMUNITYGTEST_H_\n#define COMMUNITYGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass CommunityGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* COMMUNITYGTEST_H_ */\n" }, { "alpha_fraction": 0.5031298995018005, "alphanum_fraction": 0.569640040397644, "avg_line_length": 23.576923370361328, "blob_id": "54397337fb9947f567cf603e1b417e1c5e2a00f1", "content_id": "8a725bbf6ef18c2719245bdfde82b7cde667f85e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1278, "license_type": "permissive", "max_line_length": 50, "num_lines": 52, "path": "/networkit/cpp/numerics/test/GaussSeidelRelaxationGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "///*\n// * GaussSeidelRelaxationGTest.cpp\n// *\n// * Created on: 03.11.2014\n// * Author: Michael\n// */\n//\n//#include \"GaussSeidelRelaxationGTest.h\"\n//\n//namespace NetworKit {\n//\n//TEST(GaussSeidelRelaxationGTest, trySolve) {\n//\tstd::vector<Vector> rows;\n//\trows.push_back({10, -1, 2, 0});\n//\trows.push_back({-1, 11, -1, 3});\n//\trows.push_back({2, -1, 10, -1});\n//\trows.push_back({0, 3, -1, 8});\n//\tMatrix A(rows);\n//\n//\tVector b = {6, 25, -11, 15};\n//\tVector x = {0, 0, 0, 0};\n//\n//\tGaussSeidelRelaxation solver;\n//\tVector result = solver.relax(A, b, x);\n//\n//\tEXPECT_EQ(1, std::round(result[0]));\n//\tEXPECT_EQ(2, std::round(result[1]));\n//\tEXPECT_EQ(-1, std::round(result[2]));\n//\tEXPECT_EQ(1, std::round(result[3]));\n//}\n//\n//TEST(GaussSeidelRelaxationGTest, tryIteration) {\n//\tstd::vector<Vector> rows;\n//\trows.push_back({10, -1, 2, 0});\n//\trows.push_back({-1, 11, -1, 3});\n//\trows.push_back({2, -1, 10, -1});\n//\trows.push_back({0, 3, -1, 8});\n//\tMatrix A(rows);\n//\n//\tVector b = {6, 25, -11, 15};\n//\tVector x = {0, 0, 0, 0};\n//\n//\tGaussSeidelRelaxation solver;\n//\tVector result = solver.relax(A, b, x, 1);\n//\n//\tEXPECT_TRUE(result[0] > 0);\n//\tEXPECT_TRUE(result[1] > 1);\n//\tEXPECT_TRUE(result[2] < 0);\n//\tEXPECT_TRUE(result[3] > 0);\n//}\n//\n//} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.710792601108551, "alphanum_fraction": 0.7242832779884338, "avg_line_length": 23.204082489013672, "blob_id": "2c62424721cb569407429f2ab55acff2d30f8558", "content_id": "a6e7d1dfc224afc5db2a755fd5b8ae00c71994cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1186, "license_type": "permissive", "max_line_length": 101, "num_lines": 49, "path": "/networkit/cpp/centrality/ApproxBetweenness2.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ApproxBetweenness2.h\n *\n * Created on: 13.06.2014\n * Author: Christian Staudt, Elisabetta Bergamini\n */\n\n#ifndef APPROXBETWEENNESS2_H_\n#define APPROXBETWEENNESS2_H_\n\n#include \"Centrality.h\"\n\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n * Approximation of betweenness centrality according to algorithm described in\n * Sanders, Geisberger, Schultes: Better Approximation of Betweenness Centrality\n */\nclass ApproxBetweenness2: public NetworKit::Centrality {\n\npublic:\n\n\t/**\n\t * The algorithm approximates the betweenness of all nodes, using weighting\n\t * of the contributions to avoid biased estimation. The run() method takes O(m)\n\t * time per sample, where m is the number of edges of the graph.\n\t *\n\t * @param\tgraph\t\tinput graph\n\t * @param\tnSamples\t user defined number of samples\n\t * @param\tnormalized normalize centrality values in interval [0,1] ?\n\t * @param\tparallel_flag\tif true, run in parallel with additional memory cost z + 3z * t\n\t */\n\tApproxBetweenness2(const Graph& G, count nSamples, bool normalized=false, bool parallel_flag=false);\n\n\tvoid run() override;\n\n\nprivate:\n\n\tcount nSamples;\n\tbool parallel_flag;\n\n};\n\n} /* namespace NetworKit */\n\n#endif /* APPROXBETWEENNESS_H_ */\n" }, { "alpha_fraction": 0.6455696225166321, "alphanum_fraction": 0.6607595086097717, "avg_line_length": 22.939393997192383, "blob_id": "4bc2070ea23a14f4d575fb33061c7982d7c09299", "content_id": "13bde0f77762f7ea3ded85a9b5419513b6514eab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 790, "license_type": "permissive", "max_line_length": 109, "num_lines": 33, "path": "/networkit/cpp/algebraic/NormalizedLaplacianMatrix.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * NormalizedLaplacianMatrix.cpp\n *\n * Created on: 20.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"NormalizedLaplacianMatrix.h\"\n\nnamespace NetworKit {\n\nNormalizedLaplacianMatrix::NormalizedLaplacianMatrix(const Graph &graph) : Matrix(graph.upperNodeIdBound()) {\n\tgraph.forNodes([&](const node i){\n\t\tdouble weightedDegree = graph.weightedDegree(i);\n\n\t\tgraph.forNeighborsOf(i, [&](const node j, double weight){\n\t\t\tif (i != j) {\n\t\t\t\tdouble weightedNeighborDegree = graph.weightedDegree(j);\n\t\t\t\tsetValue(i, j, -weight/sqrt(weightedDegree * weightedNeighborDegree));\n\t\t\t}\n\t\t});\n\n\t\tif (weightedDegree != 0.0) {\n\t\t\tif (graph.isWeighted()) {\n\t\t\t\tsetValue(i, i, 1-(graph.weight(i, i)) / weightedDegree);\n\t\t\t} else {\n\t\t\t\tsetValue(i, i, 1);\n\t\t\t}\n\t\t}\n\t});\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.7300636172294617, "alphanum_fraction": 0.743416965007782, "avg_line_length": 43.89075469970703, "blob_id": "b1ba7ed5619e7511f149fceaff2bc72cfa3f9fee", "content_id": "29ad1d0c9753f1e6348b68a119a7fee55a476f73", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 16034, "license_type": "permissive", "max_line_length": 496, "num_lines": 357, "path": "/Doc/doc/news.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": ".. |br| raw:: html\n\n <br />\n\n.. role:: hidden\n :class: hidden\n\n====\nNews\n====\n\n.. just ignore the following header. This is a hack to make the other headings created with ~ smaller.\n\n:hidden:`HiddenBiggerHeadingFont`\n---------------------------------\n\nJuly 05, 2016: **NetworKit 4.1.1 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis is a more of a maintenance release, that fixes the pip package and building with clang is possible again (at least with version 3.8). \n\nNote: You can control which C++ compiler the setup.py of the networkit package is supposed to use with e.g. :code:`CXX=clang++ pip install networkit`. This may be helpful when the setup fails to detect the compiler.\n\n\nJune 23, 2016: **NetworKit 4.1 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nToday we announce the next version of NetworKit, the open-source toolkit for large-scale network analysis.\nNetworKit is a Python package, with performance-critical algorithms implemented in C++/OpenMP.\n\n**Release notes**\n\nMajor:\n\nnew website\n\nC++ implementation of Lean Algebraic Multigrid (LAMG) by Livne et al.\nfor solving large Laplacian systems serves as backend for various\nnetwork analysis kernels\n\ncentrality module\n\n- centrality.TopCloseness: Implementation of a new algorithm for\n finding the top-k nodes with highest closeness centrality faster than\n computing it for all nodes (E. Bergamini, M. Borassi, P. Crescenzi,\n A. Marino, H. Meyerhenke, \"Computing Top-k Closeness Centrality\n Faster in Unweighted Graphs\", ALENEX'16)\n\ngenerator module:\n\n- generator.HyperbolicGenerator: a fast parallel generator for complex\n netwoks based on hyperbolic geometry (Looz, Meyerhenke, Prutkin '15:\n Random Hyperbolic Graphs in Subquadratic Time)\n\n|  \n\n   \nMinor:\n\nre-introduced an overview(G)-function that collects and prints some\ninfromation about a graph\n\nupdated documentation\n\nsome IO bugfixes\n\ngraph module:\n\n- Subgraph class has been removed, its functionality is now in\n Graph::subgraphFromNodes(...)\n\ngenerator module: \n\n- Many graph generators now provide fit(G) method that returns an\n instance of the generator such that generated graphs are similar to\n the provided one\n- Improved performance of the BarabasiAlbert generator by implementing\n Batagelj's method\n\ndistance module:\n\n- distance.CommuteTimeDistance: a node distance measure, distance is\n low when there are many short paths connecting two nodes\n- Adapted Diameter class to Algorithm convention; diameter algorithm\n can be chosen via enum in the constructor\n- Adapted EffectiveDiameter class to Algorithm convention resulting in\n the classes ApproxEffectiveDiameter, ApproxHopPlot,\n ApproxNeighborhoodFunction; added exact computation of the\n Neighborhood Function\n\ncentrality module:\n\n- centrality.SpanningEdgeCentraliy: edge centrality measure\n representing the fraction of spanning trees containing the edge\n- centrality.ApproxCloseness: new algorithm for approximating closeness\n centrality based on \"Computing Classic Closeness Centrality, at\n Scale\", Cohen et al.\n\n|\n\n\n\n\nMay 9, 2016: **NetworKit journal paper accepted at Network Science**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOur paper describing NetworKit as a toolkit for large-scale complex network analysis has been accepted by the Cambride University Press journal Network Science. |br| |br|\n\n\n\nApr 12, 2016: **Publication accepted at SNAM**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOur paper on sparsification methods for social networks with NetworKit (authors: Linder, Staudt, Hamann, Meyerhenke, Wagner) has been accepted for publication in Social Network Analysis and Mining. |br| |br|\n\n\n\nApr 12, 2016: **Publication accepted at Internet Mathematics**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOur paper on approximating betweenness centrality in dynamic networks with NetworKit (authors: Bergamini, Meyerhenke) has been accepted for publication in Internet Mathematics. |br| |br|\n\n\n\nNov 16, 2016: **Publication accepted at ALENEX16**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOur paper on finding the top-k nodes with highest closeness centrality with NetworKit (authors: Bergamini, Borassi, Crescenzi, Marino, Meyerhenke) has been accepted at the 18th Meeting on Algorithm Engineering and Experiments, ALENEX 2016. |br| |br|\n\n\n\nNov 10, 2015: **NetworKit 4.0 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nWe have just released NetworKit 4.0. Apart from several improvements to algorithms and architecture, the main feature of this release is a new front end for exploratory network analysis, which is described here:\n\nhttp://nbviewer.ipython.org/urls/networkit.iti.kit.edu/data/uploads/docs/Profiling.ipynb\n\nThe new version is now available from the Python Package index. Try upgrading with\n:code:`pip3 install —upgrade networkit` |br| |br|\n\n\nAug 19, 2015: **NetworKit 3.6 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nWe have released version 3.6 today. Thank you to all contributors. Here are the release notes.\n\n*Release Notes*\n\nMajor Updates:\n\nLink Prediction\n\nLink prediction methods try to predict the likelihood of a future or missing connection between two nodes in a given network. The new module networkit.linkprediction contains various methods from the literature.\n\nEdge Sparsification\n\nSparsification reduces the size of networks while preserving structural and statistical properties of interest. The module networkit.sparsification provides methods for rating edges by importance and then filtering globally by these scores. The methods are described in http://arxiv.org/abs/1505.00564\n\n\nFurther Updates:\n\n- Improved support for directed graph in analysis algorithms\n- Improved support for the Intel compiler\n- Reader/writer for the GEXF (Gephi) graph file format\n- EdgeListReader now reads edge list with arbitrary node ids (e.g.strings) when continuous=False; getNodeMap() returns a mapping from file node ids to graph node ids\n- EdgeListReader/Writer now add weights when reading files/writing graphs to file. |br| |br|\n\n\nJun 16, 2015: **Publication accepted at ESA15**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOur paper on the approximation of betweenness centrality in fully-dynamic networks with NetworKit (authors: Bergamini, Meyerhenke) has been accepted at the 23rd European Symposium on Algorithms, ESA 2015. |br| |br|\n\n\nJun 9, 2015: **NetworKit 3.5 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nWe have released NetworKit 3.5 a couple days ago. Please upgrade to the latest version to receive a number of improvements. We also appreciate feedback on the new release.\n\n*Release Notes*\n\nThis release focused on bugfixes, under-the-hood improvements and refactoring.\n\n- Various bugfixes and stability improvements\n- Abort signal handling: developed mechanism to interrupt long-running algorithms via the ctrl+C command -- already supported in community.PLM, centrality.Betweennness, centrality.ApproxBetweenness, centrality.ApproxBetweenness2, centrality.PageRank\n- Efficient node and edge iteration on the Python layer: G.forEdges, G.forNodes...\n- Constant-time check if a graph has self-loops: Graph.hasSelfLoops()\n- networkit.setSeed: set a fixed seed for the random number generator\n- Refactoring: CoreDecomposition and LocalClusteringCoefficient now in centrality module\n- Refactoring: introduced Python/Cython base classes: Centrality, CommunityDetector\n- Removed: CNM community detection algorithm\n- The GIL (Global Interpreter Lock) is released for many algorithms in order to make it possible to execute multiple computations in parallel in a single Python process.\n- Improved support for directed graphs in many algorithms |br| |br|\n\n\nDec 4, 2014: **NetworKit 3.4 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nToday we have released version 3.4 of NetworKit, the open-source toolkit for high-performance network analysis. This release brings numerous critical bugfixes as well as useful incremental features and performance optimizations. We are also moving towards consistent interfaces for algorithms. We have also further simplified the installation dependencies.\n\nThank you to the numerous people who have contributed code to this release.\n\nMore information can be found on https://networkit.iti.kit.edu/. We welcome user feedback and opportunities for collaboration.\n\nRelease Notes\n\nFeatures\n\n* graph\n * Graph can be copied on Python level\n * spanning tree/forest (graph.SpanningForest)\n* algorithms in general\n * Edmonds-Karp max flow algorithm (flow.EdmondsKarp)\n * core decomposition works for directed graphs (properties.CoreDecomposition)\n * algebraic distance, a structural distance measure in graphs (distance.AlgebraicDistance)\n* IO\n * there is no longer a default graph file format\n * read and write the GML graph file format (graphio.GMLGraphReader/Writer)\n * conversion of directed to undirected graph (Graph.toUndirected)\n * reader and writer for the GraphTool binary graph format (graphio.GraphToolBinaryReader)\n * METIS graph reader supports arbitrary edge weights (graphio.METISGraphReader)\n* algebraic\n * algebraic backend supports rectangular matrices (Matrix.h)\n* community detection\n * turbo mode for PLM community detection algorithm gives a factor 2 speedup at the cost of more memory (community.PLM)\n * Cut Clustering community detection algorithm (community.CutClustering)\n* generators\n * Erdös-Renyi generator can generate directed graphs (generators.ErdosRenyiGenerator)\n * configuration model graph generator for generating a random simple graph with exactly the given degree sequence (generators.ConfigurationModelGenerator)\n * generator for power law degree sequences (generators.PowerlawDegreeSequence)\n\nBugfixes\n\n* GraphMLReader improved (graphio.GraphMLReader)\n* ConnectedComponents usability improved\n* KONECT reader (graphio.KONECTGraphReader)\n* fixed build problem on case-insensitive file systems\n* closed memory leaks by adding missing destructors on the Cython\n* improved memory management by adding missing move constructors\n* DynamicForestFireGenerator fixed\n\nRefactoring\n\n* standardization of analysis algorithm interface: parameters given by constructor, computation triggered in run method, results retrieved via getter methods\n* run methods return self to allow chaining\n* introducing unit tests on Python layer\n\nBuild and Installation\n\n* pip installation does no longer require Cython\n* pip installation does no longer require SCons, minimal build system as fallback if SCons is missing |br| |br|\n\n\n\nOct 21, 2014: **Publication accepted at ALENEX15**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOur paper on approximating betweenness centrality in dynamic networks with NetworKit (authors: Bergamini, Meyerhenke, Staudt) has been accepted at the 17th Meeting on Algorithm Engineering and Experiments, ALENEX 2015. |br| |br|\n\n\n\nSep 28, 2014: **NetworKit presented at summer school tutorial on network analysis**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nIn a joint tutorial on Algorithmic methods for network analysis with Dorothea Wagner for the summer school of the DFG priority programme Algorithm Engineering, Henning Meyerhenke introduced NetworKit to the participants. The PhD students from Germany and other European countries successfully solved various network analysis tasks with NetworKit during the tutorial. |br| |br|\n\n\n\nSep 28, 2014: **Publication accepted**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nOur paper on selective community detection with NetworKit (authors: Staudt, Marrakchi, Meyerhenke) has been accepted at the First International Workshop on High Performance Big Graph Data Management, Analysis, and Mining (in Conjunction with IEEE BigData'14). |br| |br|\n\n\n\nAug 22, 2014: **NetworKit 3.3 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nNetworKit 3.3 has been released, including the following improvements to our network analysis framework:\n\n- renamed package to \"networkit\" according to Python packaging convention\n- restructured package to enable \"pip install networkit\"\n- improved community detection algorithms\n- improved diameter algorithms\n- added support for efficient, arbitrary edge attributes via edge indexing\n- Eigenvector Centrality & PageRank on basis of scipy\n- spectral methods for graph partitioning (partitioning.SpectralPartitioner), drawing (viztools.layout.SpectralLayout) and coloring (coloring.SpectralColoring)\n- new graph generators: stochastic blockmodel (generators.StochasticBlockmodel), Watts-Strogatz model (generators.WattsStrogatzGenerator) and Forest Fire model (generators.DynamicForestFireGenerator)\n- union find data structure (structures/UnionFind)\n- simple spanning forest algorithm (graph.SpanningForest)\n- fast algorithm for partition intersection (community/PartitionIntersection)\n- hub dominance in communities (community.HubDominance)\n- reader for Matlab adjacency matrices\n- support for reading and writing Covers\n- performance improvements in Gephi streaming interface |br| |br|\n\n\n\nJul 1, 2014: **NetworKit 3.2 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nNetworKit 3.2 has been released, including major improvements to our network analysis framework:\n\n*Critical Bugfixes*\n\n- graph data structure supports directed graphs\n- optimized connected components algorithm (properties.ParallelConnectedComponents)\n- faster heuristic algorithm for approximating betweenness centrality (centrality.ApproxBetweenness2)\n- Gephi support: export of node attributes, Gephi streaming plugin support\n- graph generators: Dorogovtsev-Mendes model\n- improved portability (Windows)\n- overhaul of graph file input |br| |br|\n\n\n\nMay 15, 2014: **New website online**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nNetworKit, our tool suite for high-performance network analysis, has its own website now! |br| |br|\n\n\n\nApr 25, 2014: **Introductory talk**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nChristian Staudt gave an introductory talk about the current release of NetworKit. The slides and a video of the talk are available on the Documentation page. |br| |br|\n\n\n\nApr 15, 2014: **NetworKit 3.1 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nVersion 3.1 is an incremental update to our tool suite for high-performance network analysis. Improvements and new features include Eigenvector centrality, PageRank, Betweenness centrality approximation, R-MAT graph generator, BFS/DFS iterators, improved BFS and Dijkstra classes, and improved memory footprint when using large objects on the Python level. More detailed information can be found in the accompanying publication. |br| |br|\n\n\n\nMar 13, 2014: **NetworKit 3.0 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nNetworKit 3.0 is the next major release of our open-source tookit for high-performance network analysis. Since the last release in November, NetworKit has received several improvements under the hood as well as an extension of the feature set. What started as a testbed for parallel community detection algorithms has evolved into a diverse set of tools that make it easy to characterize complex networks. This has been successfully scaled to large data sets with up to several billions of edges.\n\nThis being an open-source project, we are very interested in incorporating feedback from data analysts and algorithm engineers. Feel free to contact us with any question on how NetworKit could be applied in your field of research. |br| |br|\n\n\n\nNov 11, 2013: **NetworKit 2.0 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nSecond major release of NetworKit. The toolkit has been improved by adding several graph algorithms and an interactive shell based on Python/Cython. We begin a more frequent release cycle. |br| |br|\n\n\n\nMar 17, 2013: **NetworKit 1.0 released**\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nInitial release of the community detection component. With this release of NetworKit, we would like to encourage reproduction of our results, reuse of code and contributions by the community. |br| |br|\n" }, { "alpha_fraction": 0.6786389350891113, "alphanum_fraction": 0.6937618255615234, "avg_line_length": 15.53125, "blob_id": "4be9767fb9b70ca3e3d84db5c66b87841b3de422", "content_id": "796a5120f30b28bd9a2f59682973ca3d4f78e80b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 529, "license_type": "permissive", "max_line_length": 51, "num_lines": 32, "path": "/networkit/cpp/algebraic/test/LaplacianMatrixGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LaplacianMatrixGTest.h\n *\n * Created on: 25.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef LAPLACIANMATRIXGTEST_H_\n#define LAPLACIANMATRIXGTEST_H_\n\n#include \"gtest/gtest.h\"\n#include \"../LaplacianMatrix.h\"\n#include \"../../graph/Graph.h\"\n#include \"../../io/METISGraphReader.h\"\n\n\nnamespace NetworKit {\n\nclass LaplacianMatrixGTest : public testing::Test {\npublic:\n\tLaplacianMatrixGTest();\n\tvirtual ~LaplacianMatrixGTest();\n};\n\n\n} /* namespace NetworKit */\n\n#endif /* LAPLACIANMATRIXGTEST_H_ */\n\n#endif\n" }, { "alpha_fraction": 0.6472684144973755, "alphanum_fraction": 0.6579572558403015, "avg_line_length": 22.38888931274414, "blob_id": "0aeec622a39b6ef2c10006edcae42e490764bf47", "content_id": "f69ded813d8e479c1bb10d4cdfec2de7a222e237", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 842, "license_type": "permissive", "max_line_length": 91, "num_lines": 36, "path": "/networkit/cpp/generators/ChungLuGenerator.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ChungLu.cpp\n *\n * Created on: Dec 23, 2013\n * Author: Henning\n * Contributors: Hoske/Weisbarth\n */\n\n#include <numeric>\n\n#include \"ChungLuGenerator.h\"\n#include \"../graph/GraphBuilder.h\"\n\nnamespace NetworKit {\n\nChungLuGenerator::ChungLuGenerator(const std::vector< NetworKit::count > &degreeSequence) :\n\t\tStaticDegreeSequenceGenerator(degreeSequence) {\n\tsum_deg = std::accumulate(seq.begin(), seq.end(), 0);\n\tn = (count) seq.size();\n}\n\nGraph ChungLuGenerator::generate() {\n\tGraphBuilder gB(n);\n\n\tgB.parallelForNodePairs([&](node u, node v) {\n\t\t/* Random number in [0, 1] */\n\t\tdouble randVal = Aux::Random::probability();\n\t\t/* Probability of edge (u, v): d(u)*d(v)/sum_deg */\n\t\tif (randVal < double(seq[u] * seq[v]) / sum_deg) {\n\t\t\tgB.addHalfOutEdge(u, v);\n\t\t}\n\t});\n\treturn gB.toGraph(true,true);\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6626190543174744, "alphanum_fraction": 0.6692857146263123, "avg_line_length": 20.76165771484375, "blob_id": "cebb3c7975c6a909226ffb40e74a4dbd96afcf0a", "content_id": "f49f446e2907eda8008fb5a9091cfa7cebbffeae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4200, "license_type": "permissive", "max_line_length": 80, "num_lines": 193, "path": "/networkit/cpp/auxiliary/PrioQueue.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PrioQueue.h\n *\n * Created on: 21.02.2014\n * Author: Henning\n */\n\n#ifndef PRIOQUEUE_H_\n#define PRIOQUEUE_H_\n\n#include <cassert>\n#include <set>\n#include <vector>\n#include <limits>\n#include <iostream>\n\n#include \"../auxiliary/Log.h\"\n\nnamespace Aux {\n\n/**\n * Priority queue with extract-min and decrease-key.\n * The type Val takes on integer values between 0 and n-1.\n * O(n log n) for construction, O(log n) for typical operations.\n */\ntemplate<class Key, class Val>\nclass PrioQueue {\n\ttypedef std::pair<Key, Val> ElemType;\n\nprivate:\n\tstd::set<ElemType> pqset;\n\tstd::vector<Key> mapValToKey;\n\n\tconst Key undefined = std::numeric_limits<Key>::max(); // TODO: make static\n\npublic:\n\t/**\n\t * Builds priority queue from the vector @a elems.\n\t */\n\tPrioQueue(const std::vector<ElemType>& elems);\n\n\t/**\n\t * Builds priority queue from the vector @a keys, values are indices\n\t * in @a keys.\n\t */\n\tPrioQueue(std::vector<Key>& keys);\n\n\t/**\n\t* Builds priority queue of the specified size @a len.\n\t*/\n\tPrioQueue(uint64_t len);\n\n\n\tvirtual ~PrioQueue() = default;\n\n\t/**\n\t * Inserts key-value pair stored in @a elem.\n\t */\n\tvirtual void insert(Key key, Val value);\n\n\t/**\n\t * Removes the element with minimum key and returns it.\n\t */\n\tvirtual ElemType extractMin();\n\n\t/**\n\t * Modifies entry with value @a value.\n\t * The entry is then set to @a newKey with the same value.\n\t * If the corresponding key is not present, the element will be inserted.\n\t */\n\tvirtual void decreaseKey(Key newKey, Val value);\n\n\t/**\n\t * Removes key-value pair given by @a elem.\n\t */\n\tvirtual void remove(const ElemType& elem);\n\n\t/**\n\t * Removes key-value pair given by value @a val.\n\t */\n\tvirtual void remove(const Val& val);\n\n\t/**\n\t * @return Number of elements in PQ.\n\t */\n\tvirtual uint64_t size() const;\n\n\n\t/**\n\t * @return current content of queue\n\t */\n\tvirtual std::set<std::pair<Key, Val>> content() const;\n\n\t/**\n\t * Removes all elements from the PQ.\n\t */\n\tvirtual void clear();\n\n\t/**\n\t * DEBUGGING\n\t */\n\tvirtual void print() {\n\t\tDEBUG(\"num entries: \", mapValToKey.size());\n\t\tfor (uint64_t i = 0; i < mapValToKey.size(); ++i) {\n\t\t\tDEBUG(\"key: \", mapValToKey[i], \", val: \", i, \"\\n\");\n\t\t}\n\t}\n};\n\n} /* namespace Aux */\n\ntemplate<class Key, class Val>\nAux::PrioQueue<Key, Val>::PrioQueue(const std::vector<ElemType>& elems) {\n\tmapValToKey.resize(elems.size());\n\tfor (auto elem: elems) {\n\t\tinsert(elem.first, elem.second);\n\t}\n}\n\ntemplate<class Key, class Val>\nAux::PrioQueue<Key, Val>::PrioQueue(std::vector<Key>& keys) {\n\tmapValToKey.resize(keys.size());\n\tuint64_t index = 0;\n\tfor (auto key: keys) {\n\t\tinsert(key, index);\n\t\t++index;\n\t}\n}\n\ntemplate<class Key, class Val>\nAux::PrioQueue<Key, Val>::PrioQueue(uint64_t len) {\n\tmapValToKey.resize(len);\n}\n\ntemplate<class Key, class Val>\ninline void Aux::PrioQueue<Key, Val>::insert(Key key, Val value) {\n\tif (value >= mapValToKey.size()) {\n\t\tuint64_t doubledSize = 2 * mapValToKey.size();\n\t\tassert(value < doubledSize);\n\t\tmapValToKey.resize(doubledSize);\n\t}\n\tpqset.insert(std::make_pair(key, value));\n\tmapValToKey.at(value) = key;\n}\n\ntemplate<class Key, class Val>\ninline void Aux::PrioQueue<Key, Val>::remove(const ElemType& elem) {\n\tremove(elem.second);\n}\n\ntemplate<class Key, class Val>\ninline void Aux::PrioQueue<Key, Val>::remove(const Val& val) {\n\tKey key = mapValToKey.at(val);\n//\tDEBUG(\"key: \", key);\n\tpqset.erase(std::make_pair(key, val));\n\tmapValToKey.at(val) = undefined;\n}\n\ntemplate<class Key, class Val>\nstd::pair<Key, Val> Aux::PrioQueue<Key, Val>::extractMin() {\n\tassert(pqset.size() > 0);\n\tElemType elem = (* pqset.begin());\n\tremove(elem);\n\treturn elem;\n}\n\ntemplate<class Key, class Val>\ninline void Aux::PrioQueue<Key, Val>::decreaseKey(Key newKey, Val value) {\n\t// find and remove element with given key\n\tremove(value);\n\n\t// insert element with new value\n\tinsert(newKey, value);\n}\n\ntemplate<class Key, class Val>\ninline uint64_t Aux::PrioQueue<Key, Val>::size() const {\n\treturn pqset.size();\n}\n\ntemplate<class Key, class Val>\ninline std::set<std::pair<Key, Val>> Aux::PrioQueue<Key, Val>::content() const {\n\treturn pqset;\n}\n\ntemplate<class Key, class Val>\ninline void Aux::PrioQueue<Key, Val>::clear() {\n\tpqset.clear();\n\tmapValToKey.clear();\n}\n\n\n#endif /* PRIOQUEUE_H_ */\n" }, { "alpha_fraction": 0.6297376155853271, "alphanum_fraction": 0.6530612111091614, "avg_line_length": 11.703703880310059, "blob_id": "774015d65f8a30e7bc7205886eb25154557d1436", "content_id": "1ce6b9c3a3cabaa11dddac9e0ba8fbb002687ef5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 343, "license_type": "permissive", "max_line_length": 44, "num_lines": 27, "path": "/networkit/cpp/structures/test/UnionFindGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * UnionFindGTest.h\n *\n * Created on: 04.12.2013\n * Author: Maximilian Vogel ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef UNIONFINDGTEST_H_\n#define UNIONFINDGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass UnionFindGTest: public testing::Test {\n\n};\n\n\n\n\n} /* namespace NetworKit */\n#endif /* UNIONFINDGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6939111351966858, "alphanum_fraction": 0.7043335437774658, "avg_line_length": 20.702381134033203, "blob_id": "56f3a4be410897ada9f1d5dabe1199505ea1e0a4", "content_id": "0cbf792448ed25fa79076bd13b828ebe09fb8868", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1823, "license_type": "permissive", "max_line_length": 156, "num_lines": 84, "path": "/networkit/cpp/centrality/SpanningEdgeCentrality.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SpanningEdgeCentrality.h\n *\n * Created on: 29.07.2015\n * Author: henningm\n */\n\n#ifndef SPANNING_H_\n#define SPANNING_H_\n\n#include \"Centrality.h\"\n#include \"../numerics/LAMG/Lamg.h\"\n\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n *\n * SpanningEdgeCentrality edge centrality.\n *\n */\nclass SpanningEdgeCentrality: public NetworKit::Centrality {\nprotected:\n\tdouble tol;\n\tLamg lamg;\n\tuint64_t setupTime;\n\npublic:\n\t/**\n\t * Constructs the SpanningEdgeCentrality class for the given Graph @a G.\n\t * @param G The graph.\n\t * @param tol constant used for the approximation: with probability at least 1-1/n, the approximated scores are within a factor 1+tol from the exact scores\n\t */\n\tSpanningEdgeCentrality(const Graph& G, double tol = 0.1);\n\n\t/**\n\t * Destructor.\n\t */\n\tvirtual ~SpanningEdgeCentrality() = default;\n\n\n\t/**\n\t* Compute spanning edge centrality scores exactly for all edges.\n\t*/\n\tvoid run() override;\n\n\n\t/**\n\t * Compute approximation by JL projection.\n\t */\n\tvoid runApproximation();\n\n\t/**\n\t * Compute approximation by JL projection in parallel.\n\t */\n\tvoid runParallelApproximation();\n\n\t/**\n\t * Only used by benchmarking. Computes an approximation by projection and solving Laplacian systems.\n\t * Measures the time needed to compute the approximation and writes the problem vectors to the\n\t * directory of the graph specified by @a graphPath.\n\t * @param directory\n\t * @return Elapsed time in milliseconds.\n\t */\n\tuint64_t runApproximationAndWriteVectors(const std::string &graphPath);\n\n\t/**\n\t * @return The elapsed time to setup the solver in milliseconds.\n\t */\n\tuint64_t getSetupTime() const;\n\t/**\n\t * Compute value for one edge only.\n\t * @param[in] u Endpoint of edge.\n\t * @param[in] v Endpoint of edge.\n\t */\n\tdouble runForEdge(node u, node v);\n\n};\n\n} /* namespace NetworKit */\n\n\n#endif /* SPANNING_H_ */\n" }, { "alpha_fraction": 0.753398060798645, "alphanum_fraction": 0.753398060798645, "avg_line_length": 18.80769157409668, "blob_id": "a7b0f4f0c53444305de936f58a2127458e62dd44", "content_id": "72a7590defcf0deab5d7e294cb34a6a5d7f4d416", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 515, "license_type": "permissive", "max_line_length": 58, "num_lines": 26, "path": "/networkit/cpp/centrality/PermanenceCentrality.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "#ifndef PERMANENCECENTRALITY_H\n#define PERMANENCECENTRALITY_H\n\n#include \"Centrality.h\"\n#include \"../structures/Partition.h\"\n\nnamespace NetworKit {\n\nclass PermanenceCentrality : public Algorithm {\npublic:\n\tPermanenceCentrality(const Graph &G, const Partition &P);\n\tvoid run();\n\tdouble getPermanence(node u);\n\tdouble getIntraClustering(node u);\nprivate:\n\tconst Graph &G;\n\tconst Partition &P;\n\tstd::vector<index> inBegin;\n\tstd::vector<node> inEdges;\n\tstd::vector<bool> marker;\n};\n\n\n}\n\n#endif // PERMANENCECENTRALITY_H\n" }, { "alpha_fraction": 0.6475903391838074, "alphanum_fraction": 0.6656626462936401, "avg_line_length": 12.833333015441895, "blob_id": "812a398ed57c142fb86b8e30108b05bb850e9368", "content_id": "1929844001706d72a166e53b3845258e49c4ef3b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 332, "license_type": "permissive", "max_line_length": 43, "num_lines": 24, "path": "/networkit/cpp/distance/test/DistanceGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DistanceGTest.h\n *\n * Created on: Sep 04, 2015\n * Author: maxv\n */\n\n#ifndef NOGTEST\n\n#ifndef DISTANCEGTEST_H_\n#define DISTANCEGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass DistanceGTest: public testing::Test {\npublic:\n};\n\n} /* namespace NetworKit */\n#endif /* DISTANCEGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6898661851882935, "alphanum_fraction": 0.6914318203926086, "avg_line_length": 25.613636016845703, "blob_id": "06aa1e7e19f9b6ed84779e0d5acdab121b64c535", "content_id": "b369817dcc9cedf327f99b23848d701f20ed3e2f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7026, "license_type": "permissive", "max_line_length": 136, "num_lines": 264, "path": "/networkit/cpp/algebraic/Matrix.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Matrix.h\n *\n * Created on: 13.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef MATRIX_H_\n#define MATRIX_H_\n\n#include \"../graph/Graph.h\"\n#include \"Vector.h\"\n#include \"SparseAccumulator.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup algebraic\n * The matrix class represents a matrix which is optimized for sparse matrices.\n */\nclass Matrix {\nprotected:\n\tGraph graph;\n\n\tcount nRows;\n\tcount nCols;\n\npublic:\n\t/** Default constructor */\n\tMatrix();\n\n\t/**\n\t * Constructs the Matrix with size @a dimension x @a dimension.\n\t * @param dimension Defines how many rows and columns this matrix has.\n\t */\n\tMatrix(const count dimension);\n\n\n\t/**\n\t * Constructs the Matrix with size @a nRows x @a nCols.\n\t * @param nRows Number of rows.\n\t * @param nCols Number of columns.\n\t */\n\tMatrix(const count nRows, const count nCols);\n\n\t/**\n\t * Constructs the @a dimension x @a dimension Matrix from the elements at position @a positions with values @values.\n\t * @param dimension Defines how many rows and columns this matrix has.\n\t * @param positions Defines the position (i,j) of each element specified in @a values.\n\t * @param values The values of the matrix elements.\n\t */\n\tMatrix(const count dimension, const std::vector<std::pair<index, index>> &positions, const std::vector<double> &values);\n\n\t/**\n\t * Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values.\n\t * @param nRows Defines how many rows this matrix has.\n\t * @param nCols Defines how many columns this matrix has.\n\t * @param positions Defines the position (i,j) of each element specified in @a values.\n\t * @param values The values of the matrix elements.\n\t */\n\tMatrix(const count nRows, const count nCols, const std::vector<std::pair<index, index>> &positions, const std::vector<double> &values);\n\n\t/**\n\t * Constructs the Matrix with the rows in @a rows.\n\t * @param rows The rows of the matrix. All rows must have the same dimension.\n\t */\n\tMatrix(const std::vector<Vector> &rows);\n\n\t/** Default copy constructor */\n\tMatrix(const Matrix &other) = default;\n\n\t/** Default move constructor */\n\tMatrix(Matrix &&other) = default;\n\n\t/** Default destructor */\n\tvirtual ~Matrix() = default;\n\n\t/** Default move assignment operator */\n\tMatrix& operator=(Matrix &&other) = default;\n\n\t/** Default copy assignment operator */\n\tMatrix& operator=(const Matrix &other) = default;\n\n\t/**\n\t * @return Number of rows.\n\t */\n\tinline count numberOfRows() const {\n\t\treturn nRows;\n\t}\n\n\t/**\n\t * @return Number of columns.\n\t */\n\tinline count numberOfColumns() const {\n\t\treturn nCols;\n\t}\n\n\t/**\n\t * @param i The row index.\n\t * @return Number of non-zeros in row @a i.\n\t */\n\tcount nnzInRow(const index i) const;\n\n\t/**\n\t * @return Number of non-zeros in this matrix.\n\t */\n\tcount nnz() const;\n\n\t/**\n\t * @return Value at matrix position (i,j).\n\t */\n\tdouble operator()(const index i, const index j) const;\n\n\t/**\n\t * Set the matrix at position (@a i, @a j) to @a value.\n\t */\n\tvoid setValue(const index i, const index j, const double value);\n\n\t/**\n\t * @return Row @a i of this matrix as vector.\n\t */\n\tVector row(const index i) const;\n\n\t/**\n\t * @return Column @a j of this matrix as vector.\n\t */\n\tVector column(const index j) const;\n\n\t/**\n\t * @return The main diagonal of this matrix.\n\t */\n\tVector diagonal() const;\n\n\t/**\n\t * Adds this matrix to @a other and returns the result.\n\t * @return The sum of this matrix and @a other.\n\t */\n\tMatrix operator+(const Matrix &other) const;\n\n\t/**\n\t * Adds @a other to this matrix.\n\t * @return Reference to this matrix.\n\t */\n\tMatrix& operator+=(const Matrix &other);\n\n\t/**\n\t * Subtracts @a other from this matrix and returns the result.\n\t * @return The difference of this matrix and @a other.\n\t *\n\t */\n\tMatrix operator-(const Matrix &other) const;\n\n\t/**\n\t * Subtracts @a other from this matrix.\n\t * @return Reference to this matrix.\n\t */\n\tMatrix& operator-=(const Matrix &other);\n\n\t/**\n\t * Multiplies this matrix with a scalar specified in @a scalar and returns the result.\n\t * @return The result of multiplying this matrix with @a scalar.\n\t */\n\tMatrix operator*(const double scalar) const;\n\n\t/**\n\t * Multiplies this matrix with a scalar specified in @a scalar.\n\t * @return Reference to this matrix.\n\t */\n\tMatrix& operator*=(const double scalar);\n\n\t/**\n\t * Multiplies this matrix with @a vector and returns the result.\n\t * @return The result of multiplying this matrix with @a vector.\n\t */\n\tVector operator*(const Vector &vector) const;\n\n\t/**\n\t * Multiplies this matrix with @a other and returns the result in a new matrix.\n\t * @return The result of multiplying this matrix with @a other.\n\t */\n\tMatrix operator*(const Matrix &other) const;\n\n\t/**\n\t * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix.\n\t * @return The result of dividing this matrix by @a divisor.\n\t */\n\tMatrix operator/(const double divisor) const;\n\n\t/**\n\t * Divides this matrix by a divisor specified in @a divisor.\n\t * @return Reference to this matrix.\n\t */\n\tMatrix& operator/=(const double divisor);\n\n\tstatic Matrix mTmMultiply(const Matrix &A, const Matrix &B);\n\n\tstatic Matrix mmTMultiply(const Matrix &A, const Matrix &B);\n\n\tstatic Vector mTvMultiply(const Matrix &matrix, const Vector &vector);\n\n\tMatrix transpose() const;\n\n\t/**\n\t * Iterate over all non-zero elements of row @a row in the matrix and call handler(index row, index column, double value)\n\t */\n\ttemplate<typename L> void forNonZeroElementsInRow(index row, L handle) const;\n\n\t/**\n\t * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure).\n\t */\n\ttemplate<typename L> void forNonZeroElementsInRowOrder(L handle) const;\n\n\t/**\n\t * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix.\n\t */\n\ttemplate<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const;\n\n\t/**\n\t * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix.\n\t */\n\ttemplate<typename L> void parallelForNonZeroElementsInRowOrder(L handle);\n};\n\n\n} /* namespace NetworKit */\n\ntemplate<typename L>\ninline void NetworKit::Matrix::forNonZeroElementsInRow(index row, L handle) const {\n\tgraph.forEdgesOf(row, [&](index j, edgeweight weight){\n\t\thandle(row, j, weight);\n\t});\n}\n\ntemplate<typename L>\ninline void NetworKit::Matrix::forNonZeroElementsInRowOrder(L handle) const {\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tgraph.forEdgesOf(i, [&](index j, edgeweight weight){\n\t\t\thandle(i, j, weight);\n\t\t});\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::Matrix::parallelForNonZeroElementsInRowOrder(L handle) const {\n#pragma omp parallel for\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tgraph.forEdgesOf(i, [&](index j, edgeweight weight){\n\t\t\thandle(i, j, weight);\n\t\t});\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::Matrix::parallelForNonZeroElementsInRowOrder(L handle) {\n#pragma omp parallel for\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tgraph.forEdgesOf(i, [&](index j, edgeweight weight){\n\t\t\thandle(i, j, weight);\n\t\t});\n\t}\n}\n\n\n#endif /* MATRIX_H_ */\n" }, { "alpha_fraction": 0.6819338202476501, "alphanum_fraction": 0.6908397078514099, "avg_line_length": 18.170732498168945, "blob_id": "6f06e447a5f01c212f7daed62d990bdb10c438be", "content_id": "2248a6d5e5db520d2a12c0505ad91a220616aff3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 786, "license_type": "permissive", "max_line_length": 87, "num_lines": 41, "path": "/networkit/cpp/viz/Layouter.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Layouter.h\n *\n * Created on: Apr 11, 2013\n * Author: Henning\n */\n\n#ifndef LAYOUTER_H_\n#define LAYOUTER_H_\n\n#include \"../graph/Graph.h\"\n\nnamespace NetworKit {\n\n/**\n * DEPRECATED: use base class LayoutAlgorithm instead\n * @ingroup viz\n */\nclass Layouter {\nprotected:\n\tPoint<float> bottomLeft;\n\tPoint<float> topRight;\n\tstd::vector<Point<float> > layout;\n\tbool initNecessary;\n\npublic:\n\t/**\n\t * DO NOT use to construct objects. Nullary constructor needed for Python shell.\n\t */\n\tLayouter() {}\n\tLayouter(Point<float> bottomLeft, Point<float> topRight, bool useGivenLayout = false);\n\tvirtual ~Layouter();\n\n\tvirtual void draw(Graph& g) = 0;\n\n\tvirtual void initialize(Graph& g);\n\tvirtual void randomInitCoordinates(Graph& g);\n};\n\n} /* namespace NetworKit */\n#endif /* LAYOUTER_H_ */\n" }, { "alpha_fraction": 0.7016260027885437, "alphanum_fraction": 0.7065040469169617, "avg_line_length": 17.923076629638672, "blob_id": "d0dbf32748aad686430cc4aed070b56cbb03daca", "content_id": "0742a0d9f9b7648075a17d93e961c58b2e0b4882", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1230, "license_type": "permissive", "max_line_length": 74, "num_lines": 65, "path": "/networkit/cpp/components/ParallelConnectedComponents.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ConnectedComponents.cpp\n *\n * Created on: Dec 16, 2013\n * Author: cls\n */\n\n#ifndef PARALLELCONNECTEDCOMPONENTS_H_\n#define PARALLELCONNECTEDCOMPONENTS_H_\n\n#include \"../graph/Graph.h\"\n#include \"../graph/BFS.h\"\n#include \"../structures/Partition.h\"\n#include \"../base/Algorithm.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup components\n * Determines the connected components of an undirected graph.\n */\nclass ParallelConnectedComponents : public Algorithm {\npublic:\n\n\tParallelConnectedComponents(const Graph& G, bool coarsening = true);\n\n\t/**\n\t * This method determines the connected components for the graph g.\n\t */\n\tvoid runSequential();\n\n\t/**\n\t * This method determines the connected components for the graph g.\n\t */\n\tvoid run();\n\n\t/**\n\t * This method returns the number of connected components.\n\t */\n\tcount numberOfComponents();\n\n\t/**\n\t * This method returns the the component in which node query is situated.\n\t *\n\t * @param[in]\tquery\tthe node whose component is asked for\n\t */\n\tcount componentOfNode(node u);\n\n\n\t/**\n\t * Return a Partition that represents the components\n\t */\n\tPartition getPartition();\n\n\nprivate:\n\tconst Graph& G;\n\tPartition component;\n\tbool coarsening;\n};\n\n}\n\n\n#endif /* PARALLELCONNECTEDCOMPONENTS_H_ */\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.680341899394989, "avg_line_length": 17.870967864990234, "blob_id": "1d137341375beb46d6b45d7c058264c50bc6c692", "content_id": "2089b67c35155ab3574bd75084ac47dbc7335b84", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 585, "license_type": "permissive", "max_line_length": 76, "num_lines": 31, "path": "/networkit/cpp/numerics/LAMG/Level/LevelFinest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LevelFinest.cpp\n *\n * Created on: 10.01.2015\n * Author: Michael\n */\n\n#include \"LevelFinest.h\"\n\nnamespace NetworKit {\n\nLevelFinest::LevelFinest() : Level(FINEST) {\n}\n\nLevelFinest::LevelFinest(const CSRMatrix &A) : Level(LevelType::FINEST, A) {\n\t// set number of testVectors to TV_NUM??\n}\n\nvoid LevelFinest::coarseType(const Vector &xf, Vector &xc) const {\n\t// do nothing!\n}\n\nvoid LevelFinest::restrict(const Vector &bf, Vector &bc) const {\n\t// do nothing!\n}\n\nvoid LevelFinest::interpolate(const Vector &xc, Vector &xf) const {\n\t// do nothing!\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.5990621447563171, "alphanum_fraction": 0.6170378923416138, "avg_line_length": 25.65625, "blob_id": "5cf083977d48e75a02788028c0fe8345523c7aa0", "content_id": "d016be7d801ac0033dc79af7526afe732e76fdd3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5118, "license_type": "permissive", "max_line_length": 143, "num_lines": 192, "path": "/networkit/cpp/spanning/test/SpanningTreeGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SpanningTreeGTest.cpp\n *\n * Created on: 20.06.2015\n * Author: Henning\n */\n\n#include \"SpanningTreeGTest.h\"\n#include \"../PseudoRandomSpanningTree.h\"\n#include \"../RandomSpanningTree.h\"\n#include \"../../graph/Graph.h\"\n#include \"../../graph/Sampling.h\"\n#include \"../../graph/BFS.h\"\n#include \"../../io/METISGraphReader.h\"\n#include <cmath>\n#include \"omp.h\"\n\nnamespace NetworKit {\n\nTEST_F(SpanningTreeGTest, testRandomSpanningTree) {\n\tMETISGraphReader reader;\n\tstd::vector<std::string> graphs = {\"karate\", \"jazz\", \"celegans_metabolic\"};\n\n\tfor (auto graphname: graphs) {\n\t\tstd::string filename = \"input/\" + graphname + \".graph\";\n\t\tGraph G = reader.read(filename);\n\t\tRandomSpanningTree rst(G);\n\t\trst.run();\n\t\tGraph T = rst.getTree();\n\n\t\tT.forNodes([&](node u) {\n\t\t\tEXPECT_GE(G.degree(u), 0);\n\t\t});\n\n\t\tnode r1 = Sampling::randomNode(G);\n\t\tnode r2 = Sampling::randomNode(G);\n\t\twhile (r1 == r2) {\n\t\t\tr2 = Sampling::randomNode(G);\n\t\t}\n\n\t\tBFS bfs(T, r1, false, false, r2);\n\t\tbfs.run();\n\t\tEXPECT_LE(bfs.distance(r2), G.numberOfNodes() - 1);\n\t}\n}\n\nTEST_F(SpanningTreeGTest, testRandomSpanningTree2) {\n\tMETISGraphReader reader;\n\tstd::vector<std::string> graphs = {\"karate\", \"jazz\", \"celegans_metabolic\"};\n\n\tfor (auto graphname: graphs) {\n\t\tstd::string filename = \"input/\" + graphname + \".graph\";\n\t\tGraph G = reader.read(filename);\n\t\tRandomSpanningTree rst(G);\n\t\trst.run2();\n\t\tGraph T = rst.getTree();\n\t\tcount nNodes = 0, nEdges = 0;\n\t\tT.forNodes([&](node u) {\n\t\t\tEXPECT_GE(T.degree(u), 1);\n\t\t\tnNodes ++;\n\t\t});\n\t\tT.forEdges([&](node u, node v){\n\t\t\tnEdges ++;\n\t\t});\n\t\tEXPECT_EQ(nNodes, nEdges + 1);\n\t\tnode r1 = Sampling::randomNode(G);\n\t\tnode r2 = Sampling::randomNode(G);\n\t\twhile (r1 == r2) {\n\t\t\tr2 = Sampling::randomNode(G);\n\t\t}\n\n\t\tBFS bfs(T, r1, false, false, r2);\n\t\tbfs.run();\n\t\tEXPECT_LE(bfs.distance(r2), G.numberOfNodes() - 1);\n\t}\n}\n\nTEST_F(SpanningTreeGTest, testPseudoRandomSpanningTree) {\n // TODO: see above\n}\n\nTEST_F(SpanningTreeGTest, benchRandomSpanningTree) {\n\tMETISGraphReader reader;\n\tstd::vector<std::string> graphs = {\"karate\", \"jazz\", \"celegans_metabolic\", \"airfoil1\",\"power\", \"PGPgiantcompo\"};\n\tcount reps = 10;\n\n\tfor (auto graphname: graphs) {\n\t\tstd::string filename = \"input/\" + graphname + \".graph\";\n\t\tGraph G = reader.read(filename);\n\n\t\tGraph Gwr(G.numberOfNodes(), true, false);\n\t\tG.forEdges([&](node u, node v) {\n\t\t\tGwr.addEdge(u, v, 0.0);\n\t\t});\n\t\tGraph Gwp = Gwr;\n\t\tGraph Gwr2 = Gwr;\n\n\t\t// random sampling\n\t\tdouble rstTime = 0.0;\n\t\tRandomSpanningTree rst(G);\n\t\tfor (index i = 0; i < reps; ++i) {\n\t\t\tdouble time = omp_get_wtime();\n\t\t\trst.run();\n\t\t\trstTime += omp_get_wtime() - time;\n\t\t\tGraph tree = rst.getTree();\n\t\t\ttree.forEdges([&](node u, node v) {\n\t\t\t\tGwr.setWeight(u, v, 1 + Gwr.weight(u, v));\n\t\t\t});\n\t\t}\n\n\t\tdouble bfsTime = 0;\n\t\tfor (index i = 0; i < reps; ++i) {\n\t\t\tnode v = G.randomNode();\n\t\t\tBFS bfs(G, v);\n\t\t\tdouble time = omp_get_wtime();\n\t\t\tbfs.run();\n\t\t\tbfsTime += omp_get_wtime() - time;\n\t\t}\n\n\t\t// random sampling 2\n\t\tINFO(\"Starting random sampling 2\");\n\t\tdouble rstTime2 = 0.0;\n\t\tRandomSpanningTree rst2(G);\n\t\tfor (index i = 0; i < reps; ++i) {\n\t\t\tdouble time = omp_get_wtime();\n\t\t\trst2.run2();\n\t\t\trstTime2 += omp_get_wtime() - time;\n\t\t\tGraph tree2 = rst2.getTree();\n\t\t\ttree2.forEdges([&](node u, node v) {\n\t\t\t\tGwr2.setWeight(u, v, 1 + Gwr2.weight(u, v));\n\t\t\t});\n\t\t}\n\t\tINFO(\"Done\");\n\n\t\t// sampling of pseudo random trees\n\t\tdouble prstTime = 0.0;\n\t\tPseudoRandomSpanningTree prst(G);\n\t\tfor (index i = 0; i < reps; ++i) {\n\t\t\tdouble time = omp_get_wtime();\n\t\t\tprst.run();\n\t\t\tprstTime += omp_get_wtime() - time;\n\t\t\tGraph tree = prst.getTree();\n\t\t\ttree.forEdges([&](node u, node v) {\n\t\t\t\tGwp.setWeight(u, v, 1 + Gwp.weight(u, v));\n\t\t\t});\n\t\t}\n\n\t\t// sampling results\n\t\tdouble maxDev = 0.0;\n\t\tdouble l1Dev = 0.0;\n\t\tdouble l2Dev = 0.0;\n\n\t\tdouble maxRatio = 0.0;\n\t\tdouble minRatio = 1e40;\n\t\tdouble gmeanRatio = 1.0;\n\n\t\tG.forEdges([&](node u, node v) {\n\t\t\tdouble dev = (Gwr.weight(u, v) - Gwp.weight(u, v)) / reps;\n\t\t\tl1Dev += fabs(dev);\n\t\t\tl2Dev += dev * dev;\n\t\t\tif (dev > maxDev) {\n\t\t\t\tmaxDev = dev;\n\t\t\t}\n\n\t\t\tif (std::min(Gwr.weight(u,v), Gwp.weight(u, v)) > 0.0) {\n\t\t\t\tdouble ratio = ((double) Gwr.weight(u, v) / (double) Gwp.weight(u, v));\n\t\t\t\tgmeanRatio *= ratio;\n\t\t\t\tif (ratio > maxRatio) {\n\t\t\t\t\tmaxRatio = ratio;\n\t\t\t\t}\n\t\t\t\tif (ratio < minRatio) {\n\t\t\t\t\tminRatio = ratio;\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t\tl2Dev = sqrt(l2Dev);\n\t\tgmeanRatio = sqrt(gmeanRatio);\n\t\tINFO(graphname, \", max: \", maxDev, \", l1: \", l1Dev, \", l2: \", l2Dev);\n\t\tINFO(graphname, \" ==> time ratio: \", (prstTime / rstTime), \", maxRatio: \", maxRatio, \", minRatio: \", minRatio, \", gmeanRatio: \", gmeanRatio);\n\t\tINFO(graphname, \" ==> time ratio2: \", (rstTime2 / rstTime));\n\t\tINFO(graphname, \" ==> time ratio bfs: \", (bfsTime / rstTime));\n\t\t// TODO: ggf. besser als externes Programm\n\t\t// TODO: random shuffle gemaess Knotengrad, die also weiter nach vorne\n\t\t// mit hoeherer Wkt.\n\t\t// Behelf: Kantenarray mit Multiplizitaet jeder Kante gemaess Summe der inzidenten Knoten\n\t\t// daraus dann zufaellig ziehen, nur beruecksichtigen, was noch nicht gezogen wurde\n\t\t// langsamer, aber nur als proof of concept!\n\t}\n}\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6373372077941895, "alphanum_fraction": 0.6651229858398438, "avg_line_length": 24.218978881835938, "blob_id": "db2d458c4244f12bb70d1af538676374b8edfa5b", "content_id": "52881338c3d379aa0e93615da53c19ebefaf9193", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3455, "license_type": "permissive", "max_line_length": 71, "num_lines": 137, "path": "/networkit/cpp/centrality/test/DynBetweennessGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynBetweennessGTest.cpp\n *\n * Created on: 05.08.2014\n * Author: ebergamini, cls\n */\n\n#include \"DynBetweennessGTest.h\"\n#include \"../Betweenness.h\"\n#include \"../DynApproxBetweenness.h\"\n#include \"../ApproxBetweenness.h\"\n#include \"../ApproxBetweenness2.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../auxiliary/Log.h\"\n#include \"../../auxiliary/NumericTools.h\"\n#include \"../../graph/Sampling.h\"\n#include \"../../generators/DorogovtsevMendesGenerator.h\"\n#include \"../../generators/ErdosRenyiGenerator.h\"\n\nnamespace NetworKit {\n\n\n\nTEST_F(DynBetweennessGTest, testDynApproxBetweennessSmallGraph) {\n/* Graph:\n0 3 6\n\t\\ / \\ /\n\t2 5\n\t/ \\ / \\\n1 4 7\n*/\n\tint n = 8;\n\tGraph G(n);\n\n\tG.addEdge(0, 2);\n\tG.addEdge(1, 2);\n\tG.addEdge(2, 3);\n\tG.addEdge(2, 4);\n\tG.addEdge(3, 5);\n\tG.addEdge(4, 5);\n\tG.addEdge(5, 6);\n\tG.addEdge(5, 7);\n\n\tdouble epsilon = 0.01; // error\n\tdouble delta = 0.1; // confidence\n\tDynApproxBetweenness dynbc(G, epsilon, delta);\n\tBetweenness bc(G);\n\tdynbc.run();\n\tbc.run();\n\tstd::vector<double> dynbc_scores = dynbc.scores();\n\tstd::vector<double> bc_scores = bc.scores();\n\tfor(int i=0; i<n; i++) {\n\t\tDEBUG(\"Difference \", dynbc_scores[i]-bc_scores[i]/double(n*(n-1)));\n\t}\n\tstd::vector<GraphEvent> batch;\n\tbatch.push_back(GraphEvent(GraphEvent::EDGE_ADDITION, 0, 6, 1.0));\n\tG.addEdge(batch[0].u, batch[0].v);\n\tbc.run();\n\tdynbc.update(batch);\n\tdynbc_scores = dynbc.scores();\n\tbc_scores = bc.scores();\n\tfor(int i=0; i<n; i++) {\n\t\tDEBUG(\"Difference \", dynbc_scores[i]-bc_scores[i]/double(n*(n-1)));\n\t}\n\n}\n\n\nTEST_F(DynBetweennessGTest, testDynVsStatic) {\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/PGPgiantcompo.graph\");\n\tcount n = G.upperNodeIdBound();\n\n\tdouble epsilon = 0.1; // error\n\tdouble delta = 0.1; // confidence\n\tINFO(\"Initializing DynApproxBetweenness\");\n\tDynApproxBetweenness dynbc(G, epsilon, delta, false);\n\tINFO(\"Initializing ApproxBetweenness\");\n\tApproxBetweenness bc(G, epsilon, delta);\n\tINFO(\"Running DynApproxBetweenness\");\n\tdynbc.run();\n\tINFO(\"Running ApproxBetweenness\");\n\tbc.run();\n\tstd::vector<double> dynbc_scores = dynbc.scores();\n\tstd::vector<double> bc_scores = bc.scores();\n\tdouble err1=0;\n\tfor(count i=0; i<n; i++) {\n\t\tdouble x = dynbc_scores[i]-bc_scores[i];\n\t\tif (x > err1)\n\t\t\terr1 = x;\n\t}\n\tDEBUG(\"Before the edge insertion: \");\n\tstd::vector<GraphEvent> batch;\n\tcount nInsertions = 10, i = 0;\n\twhile (i < nInsertions) {\n\t\tnode v1 = Sampling::randomNode(G);\n\t\tnode v2 = Sampling::randomNode(G);\n\t\tif (v1 != v2 && !G.hasEdge(v1, v2)) {\n\t\t\tG.addEdge(v1, v2);\n\t\t\tbatch.push_back(GraphEvent(GraphEvent::EDGE_ADDITION, v1, v2, 1.0));\n\t\t\ti++;\n\t\t}\n\t}\n\tINFO(\"Running ApproxBetweenness (again)\");\n\tbc.run();\n\tINFO(\"Updating DynApproxBetweenness\");\n\tdynbc.update(batch);\n\tINFO(\"Calling DynApproxBetweenness Scores\");\n\tdynbc_scores = dynbc.scores();\n\tINFO(\"Calling ApproxBetweenness Scores\");\n\tbc_scores = bc.scores();\n\terr1 = 0;\n\tfor(count i=0; i<n; i++) {\n\t\tdouble x = dynbc_scores[i]-bc_scores[i];\n\t\tif (x > err1)\n\t\t\terr1 = x;\n\t}\n\tDEBUG(\"After the edge insertion: \");\n\n}\n\n\nTEST_F(DynBetweennessGTest, testApproxBetweenness) {\n\tMETISGraphReader reader;\n\tDorogovtsevMendesGenerator generator(1000);\n\tGraph G1 = generator.generate();\n\tGraph G(G1, true, false);\n\tApproxBetweenness bc(G, 0.1, 0.1);\n\tbc.run();\n\tDEBUG(\"Number of samples: \", bc.numberOfSamples());\n\tApproxBetweenness bc1(G1, 0.1, 0.1);\n\tbc1.run();\n\tDEBUG(\"Number of samples: \", bc1.numberOfSamples());\n}\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6888985633850098, "alphanum_fraction": 0.6938952803611755, "avg_line_length": 38.68965530395508, "blob_id": "814ba69d4eaf811220f1b61888c9175ed147517f", "content_id": "30f2083a0afc7e7bb2d17c613cab431ddda59e54", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4603, "license_type": "permissive", "max_line_length": 155, "num_lines": 116, "path": "/mbe.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "import os\nimport subprocess\nimport shlex\nimport multiprocessing\nimport fnmatch\nfrom concurrent.futures import ProcessPoolExecutor\n\nclass MinimalBuildEnvironment:\n\t\"\"\" A very minimalistic Build Environment that aims to replace SCons for the\n\t\tNetworKit installation via PIP/easy_install on systems where SCons is not available. \n\t\tCurrently, it is only possible to compile NetworKit as a library in optimization flags, \n\t\twhich is the desired behavior for installation.\n\t\tIt remains unclear if this build environment will be extended to fully replace SCons.\n\t\"\"\"\n\n\tdef __init__(self, default_compile_flags,debug_compile_flags,release_compile_flags,linker_flags,optimize,compiler, src_dir):\n\t\t\"\"\" Constructor. \"\"\"\n\t\tself.__default_compile_flags\t= default_compile_flags\n\t\tself.__debug_compile_flags\t\t= debug_compile_flags\n\t\tself.__release_compile_flags\t= release_compile_flags\n\t\tself.__linker_flags\t\t\t\t= linker_flags\n\t\tself.__optimize\t\t\t\t\t= optimize\n\t\tself.__build_dir\t\t\t\t= \".build{0}\".format(optimize)\n\t\tself.__compiler\t\t\t\t\t= compiler\n\t\tself.__src_dir\t\t\t\t\t= src_dir\n\t\tself.__object_files\t\t\t\t= []\n\n\tdef __getSourceFiles(self, target):\n\t\t\"\"\" This functions gathers and returns all .cpp files for the given target. \"\"\"\n\t\tsource = []\n\n\t\t# walk source directory and find ONLY .cpp files\n\t\tfor (dirpath, dirnames, filenames) in os.walk(self.__src_dir):\n\t\t\tfor name in fnmatch.filter(filenames, \"*.cpp\"):\n\t\t\t\tsource.append(os.path.join(dirpath, name))\n\n\t\t# exclude files depending on target, executables will be addes later\n\t\txpatterns = [\"*-X.cpp\"]\n\t\texcluded = []\n\n\t\t# only the target \"Test\" requires Benchmark and GTest files\n\t\tif (target not in [\"Tests\"]):\n\t\t\t# exclude files matching following patterns\n\t\t\txpatterns += [\"*GTest.cpp\",\"*Benchmark.cpp\"]\n\n\t\tfor pattern in xpatterns:\n\t\t\tfor name in fnmatch.filter(source, pattern):\n\t\t\t\texcluded.append(name)\n\n\t\t#print(\"excluded source files: {0}\".format(excluded))\n\t\tsource = [name for name in source if name not in excluded]\n\n\t\t# add executable\n\t\tif target == \"Tests\":\n\t\t\tsource.append(os.path.join(srcDir, \"Unittests-X.cpp\"))\n\t\telif target in [\"Core\",\"Lib\"]:\n\t\t\tpass # no executable\n\t\telse:\n\t\t\tprint(\"Unknown target: {0}\".format(target))\n\t\t\texit(1)\n\t\treturn source\n\n\tdef compile_file(self, file):\n\t\t\"\"\" Compiles a given file and returns the name of the output file. \"\"\"\n\t\tofile = \"{0}.o\".format(file.split(\"/\")[-1][:-4])\n\t\tcomp_cmd = [self.__compiler] + self.__default_compile_flags + self.__release_compile_flags + [\"-o{0}\".format(os.path.join(self.__build_dir,ofile)), file]\n\t\tprint(\" \".join(comp_cmd))\n\t\treturn (subprocess.call(comp_cmd),ofile)\n\n\tdef compile(self, target):\n\t\t\"\"\" Compiles the all source files and runs the appropriate commands for the given target. \"\"\"\n\t\t# make build dir if not existing yet\n\t\tif not os.path.exists(self.__build_dir):\n\t\t\tos.mkdir(self.__build_dir)\n\n\t\t# get source files\n\t\tcppfiles = self.__getSourceFiles(target)\n\t\t#print(cppfiles)\n\t\t# compile each source file on its own. halt if an error occurs\n\t\twith ProcessPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:\n\t\t\tfor (returncode, ofile) in executor.map(self.compile_file,cppfiles):\n\t\t\t\tself.__object_files.append(ofile)\n\t\t\t\tif not returncode == 0:\n\t\t\t\t\tprint(\"compilation of a file went wrong, exiting...\")\n\t\t\t\t\texit(1)\n\n\t\t# pull together each object file in one string for linking/archiving\n\t\t#linker_sources_str = \"\"\n\t\tlinker_sources = []\n\t\tfor o in self.__object_files:\n\t\t\t#linker_sources_str += (os.path.join(self.__build_dir,o) + \" \")\n\t\t\tlinker_sources.append(os.path.join(self.__build_dir,o))\n\n\n\t\t# link/archive files\n\t\tlink_cmd = [\"ar\",\"rc\", \"libNetworKit-Core-{0}.a\".format(self.__optimize)] + linker_sources\n\t\tprint(\" \".join(link_cmd))\n\t\tif not subprocess.call(link_cmd) == 0: #(link_proc.returncode != 0):\n\t\t\tprint(\"error during linking/archiving, exiting...\")\n\t\t\texit(1)\n\t\t# index archive (if this build environment ever gets extended, this should be changed...)\n\t\tindex_cmd = [\"ranlib\",\"libNetworKit-Core-{0}.a\".format(self.__optimize)]\n\t\tprint(\" \".join(index_cmd))\n\t\tif not subprocess.call(index_cmd) == 0:\n\t\t\tprint(\"error during indexing, exiting...\")\n\t\t\texit(1)\n\nif __name__ == \"__main__\":\n\tDEFAULTCOMPILEFLAGS = [\"-c\", \"-std=c++11\", \"-Wall\", \"-fmessage-length=0\", \"-fPIC\", \"-fopenmp\"]\n\tDEBUGCOMPILEFLAGS = [\"-O0\", \"-g3\", \"-DLOG_LEVEL=LOG_LEVEL_TRACE\"]\n\tRELEASECOMPILEFLAGS = [\"-O3\", \"-DNDEBUG\", \"-DLOG_LEVEL=LOG_LEVEL_INFO\"]\n\tLINKERFLAGS = \"\"\n\n\t# test functionality by compiling NetworKit as a library.\n\tbuilder = MinimalBuildEnvironment(DEFAULTCOMPILEFLAGS,\"\",RELEASECOMPILEFLAGS,\"\",\"Opt\",\"g++\", \"networkit/cpp\")\n\tbuilder.compile(\"Core\")" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7290209531784058, "avg_line_length": 30.72222137451172, "blob_id": "36027fd27c34f7408901ab689403876d5bf858a3", "content_id": "4654701cb50dd4421488bdd1324deabc1d24f678", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 572, "license_type": "permissive", "max_line_length": 81, "num_lines": 18, "path": "/LibDemo.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "#include <NetworKit/io/METISGraphReader.h>\n#include <NetworKit/auxiliary/Log.h>\n#include <NetworKit/community/PLM.h>\n#include <iostream>\n\n\nint main() {\n\tstd::cout << \"simple demonstration of NetworKit as a library\\n\";\n\tAux::Log::Settings::setLogLevel(Aux::Log::LogLevel::info);\n\tNetworKit::METISGraphReader reader;\n\tNetworKit::Graph jazz = reader.read(\"./input/jazz.graph\");\n\tNetworKit::PLM plm(jazz,true);\n\tplm.run();\n\tNetworKit::Partition communities = plm.getPartition();\n\tstd::cout << communities.numberOfSubsets() << \" communities have been found\\n\";\t\n\treturn 0;\n\n}\n\n" }, { "alpha_fraction": 0.6280276775360107, "alphanum_fraction": 0.6435986161231995, "avg_line_length": 16, "blob_id": "c9bdc049e5f65eaaeeff854d9e45069e8c82a551", "content_id": "8b54a87ec5526cd1198f9b34bfc03d20cdac8829", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 578, "license_type": "permissive", "max_line_length": 62, "num_lines": 34, "path": "/networkit/cpp/io/GraphReader.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphReader.h\n *\n * Created on: 17.01.2013\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef GRAPHREADER_H_\n#define GRAPHREADER_H_\n\n#include \"../graph/Graph.h\"\n#include \"../auxiliary/StringTools.h\"\n\n\nnamespace NetworKit {\n\n/**\n * @ingroup io\n * Abstract base class for graph readers.\n */\nclass GraphReader {\npublic:\n\tvirtual ~GraphReader() = default;\n\n\t/**\n\t * Given the path of an input file, read the graph contained.\n\t *\n\t * @param[in]\tpath\tinput file path\n\t */\n\tvirtual Graph read(const std::string& path) = 0;\n};\n\n} /* namespace NetworKit */\n#endif /* GRAPHREADER_H_ */\n" }, { "alpha_fraction": 0.7191358208656311, "alphanum_fraction": 0.730246901512146, "avg_line_length": 32.402061462402344, "blob_id": "5753e77f9c207811ffca46dcaeb51a7445b1f926", "content_id": "7ae2498686748cc8a8fd8b1dff2b03f13e5e1531", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3240, "license_type": "permissive", "max_line_length": 181, "num_lines": 97, "path": "/networkit/cpp/numerics/LAMG/Lamg.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Lamg.h\n *\n * Created on: Oct 20, 2015\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef NETWORKIT_CPP_NUMERICS_LAMG_LAMG_H_\n#define NETWORKIT_CPP_NUMERICS_LAMG_LAMG_H_\n\n#include <vector>\n\n#include \"../LinearSolver.h\"\n#include \"MultiLevelSetup.h\"\n#include \"SolverLamg.h\"\n#include \"../GaussSeidelRelaxation.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup numerics\n * Represents the interface to the Lean Algebraic Multigrid (LAMG) graph Laplacian linear solver\n * by Oren E. Livne and Achi Brandt.\n * @see Livne, Oren E., and Achi Brandt. \"Lean algebraic multigrid (LAMG): Fast graph Laplacian linear solver.\" SIAM Journal on Scientific Computing 34.4 (2012): B499-B522.\n */\nclass Lamg : public LinearSolver {\nprivate:\n\tbool validSetup;\n\tGaussSeidelRelaxation smoother;\n\tMultiLevelSetup lamgSetup;\n\tCSRMatrix laplacianMatrix;\n\tstd::vector<LevelHierarchy> compHierarchies;\n\tstd::vector<SolverLamg> compSolvers;\n\tstd::vector<LAMGSolverStatus> compStati;\n\n\tstd::vector<Vector> initialVectors;\n\tstd::vector<Vector> rhsVectors;\n\n\tcount numComponents;\n\tstd::vector<std::vector<index>> components;\n\tstd::vector<index> graph2Components;\n\n\tvoid initializeForOneComponent();\n\npublic:\n\t/**\n\t * Construct a solver with the given @a tolerance. The relative residual ||Ax-b||/||b|| will be less than or equal to\n\t * @a tolerance after the solver finished.\n\t * @param tolerance\n\t */\n\tLamg(const double tolerance = 1e-6);\n\t/** Default destructor */\n\t~Lamg() = default;\n\n\t/**\n\t * Compute the multigrid hierarchy for the given Laplacian matrix @a laplacianMatrix.\n\t * @param laplacianMatrix\n\t * @note This method also works for disconnected graphs. If you know that the graph is connected,\n\t * if is faster to use @ref setupConnected instead.\n\t */\n\tvoid setup(const CSRMatrix &laplacianMatrix);\n\n\t/**\n\t * Compute the multigrid hierarchy for te given Laplacian matrix @a laplacianMatrix.\n\t * @param laplacianMatrix\n\t * @note The graph has to be connected for this method to work. Otherwise the output is undefined.\n\t */\n\tvoid setupConnected(const CSRMatrix &laplacianMatrix);\n\n\t/**\n\t * Computes the @a result for the matrix currently setup and the right-hand side @a rhs.\n\t * The maximum spent time can be specified by @a maxConvergenceTime and the maximum number of iterations can be set\n\t * by @a maxIterations.\n\t * @param rhs\n\t * @param result\n\t * @param maxConvergenceTime\n\t * @param maxIterations\n\t * @return A @ref SolverStatus object which provides some statistics like the final absolute residual.\n\t */\n\tSolverStatus solve(const Vector &rhs, Vector &result, count maxConvergenceTime = 5 * 60 * 1000, count maxIterations = std::numeric_limits<count>::max());\n\n\t/**\n\t * Compute the @a results for the matrix currently setup and the right-hand sides @a rhs.\n\t * The maximum spent time for each system can be specified by @a maxConvergenceTime and the maximum number of iterations can be set\n\t * by @a maxIterations.\n\t * @param rhs\n\t * @param results\n\t * @param maxConvergenceTime\n\t * @param maxIterations\n\t */\n\tvoid parallelSolve(const std::vector<Vector> &rhs, std::vector<Vector> &results, count maxConvergenceTime = 5 * 60 * 1000, count maxIterations = std::numeric_limits<count>::max());\n\n};\n\n} /* namespace NetworKit */\n\n#endif /* NETWORKIT_CPP_NUMERICS_LAMG_LAMG_H_ */\n" }, { "alpha_fraction": 0.6416772603988647, "alphanum_fraction": 0.6505717635154724, "avg_line_length": 20.561643600463867, "blob_id": "082691f0dee355c266e64db01fda5f4c7648060f", "content_id": "ad875cf44138928961c741ef33b4e70c73e7ec91", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1574, "license_type": "permissive", "max_line_length": 108, "num_lines": 73, "path": "/networkit/cpp/centrality/Closeness.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Closeness.cpp\n *\n * Created on: 03.10.2014\n * Author: nemes\n */\n\n#include <stack>\n#include <queue>\n#include <memory>\n\n#include \"Closeness.h\"\n#include \"../auxiliary/PrioQueue.h\"\n#include \"../auxiliary/Log.h\"\n#include \"../graph/SSSP.h\"\n#include \"../graph/Dijkstra.h\"\n#include \"../graph/BFS.h\"\n#include \"../components/ConnectedComponents.h\"\n\n\nnamespace NetworKit {\n\nCloseness::Closeness(const Graph& G, bool normalized, bool checkConnectedness) : Centrality(G, normalized) {\n\t// TODO: extend closeness definition to make check for connectedness unnecessary\n\tif (checkConnectedness) {\n\t\tConnectedComponents compo(G);\n\t\tcompo.run();\n\t\tif (compo.numberOfComponents() != 1) {\n\t\t\tthrow std::runtime_error(\"Closeness is undefined on disconnected graphs\");\n\t\t}\n\t}\n}\n\nvoid Closeness::run() {\n\tcount z = G.upperNodeIdBound();\n\tscoreData.clear();\n\tscoreData.resize(z);\n\tedgeweight infDist = std::numeric_limits<edgeweight>::max();\n\n\tG.parallelForNodes([&](node s) {\n\t\tstd::unique_ptr<SSSP> sssp;\n\t\tif (G.isWeighted()) {\n\t\t\tsssp.reset(new Dijkstra(G, s, true, true));\n\t\t} else {\n\t\t\tsssp.reset(new BFS(G, s, true, true));\n\t\t}\n\t\tsssp->run();\n\n\t\tstd::vector<edgeweight> distances = sssp->getDistances();\n\n\t\tdouble sum = 0;\n\t\tfor (auto dist : distances) {\n\t\t\tif (dist != infDist ) {\n\t\t\t\tsum += dist;\n\t\t\t}\n\t\t}\n\t\tscoreData[s] = 1 / sum;\n\n\t});\n\tif (normalized) {\n\t\tG.forNodes([&](node u){\n\t\t\tscoreData[u] = scoreData[u] * (G.numberOfNodes() - 1);\n\t\t});\n\t}\n\n\thasRun = true;\n}\n\ndouble Closeness::maximum() {\n\treturn (double) 1 / (G.numberOfNodes() - 1);\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.5129151344299316, "alphanum_fraction": 0.5903005003929138, "avg_line_length": 25.907800674438477, "blob_id": "2d4f5639caebac0ce9b20220b16762ff897ab2a7", "content_id": "84075f2e77fd49571e444623858bb9e75748f6b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 18970, "license_type": "permissive", "max_line_length": 178, "num_lines": 705, "path": "/networkit/cpp/algebraic/test/DenseMatrixGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DenseMatrixGTest.cpp\n *\n * Created on: Nov 25, 2015\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"DenseMatrixGTest.h\"\n\nnamespace NetworKit {\n\nDenseMatrixGTest::DenseMatrixGTest() {\n}\n\nDenseMatrixGTest::~DenseMatrixGTest() {\n}\n\nTEST(DenseMatrixGTest, testMatrixDimension) {\n\tDenseMatrix mat(10, 10, std::vector<double>(100));\n\n\tASSERT_EQ(10u, mat.numberOfRows());\n\tASSERT_EQ(10u, mat.numberOfColumns());\n\n\tmat = DenseMatrix(5, 10, std::vector<double>(50));\n\tASSERT_EQ(5u, mat.numberOfRows());\n\tASSERT_EQ(10u, mat.numberOfColumns());\n\n\tmat = DenseMatrix(10, 5, std::vector<double>(50));\n\tASSERT_EQ(10u, mat.numberOfRows());\n\tASSERT_EQ(5u, mat.numberOfColumns());\n}\n\n\nTEST(DenseMatrixGTest, testRowAndColumnAccess) {\n\tstd::vector<double> values(100*100, 0.0);\n\n\tfor (index i = 0; i < 100; ++i) {\n\t\tvalues[3 * 100 + i] = i;\n\t}\n\n\tvalues[10*100 + 10] = 42.123;\n\n\tDenseMatrix mat(100, 100, values);\n\n\tVector v = mat.row(3);\n\tASSERT_EQ(mat.numberOfColumns(), v.getDimension());\n\n\tfor (index i = 0; i < 100; ++i) {\n\t\tEXPECT_EQ(i, v[i]);\n\t}\n\n\tv = mat.row(10);\n\tASSERT_EQ(v.getDimension(), mat.numberOfColumns());\n\tASSERT_TRUE(v.isTransposed());\n\tEXPECT_EQ(42.123, v[10]);\n\n\tv = mat.column(10);\n\tASSERT_EQ(mat.numberOfRows(), v.getDimension());\n\tASSERT_FALSE(v.isTransposed());\n\n\tEXPECT_EQ(10.0, v[3]);\n\tEXPECT_EQ(42.123, v[10]);\n\n\tmat.setValue(10, 10, 42);\n\tEXPECT_EQ(42, mat(10,10));\n\n\n\t// rectangular matrix\n\t// n x m (n < m)\n\tvalues = std::vector<double>(5*10, 0.0);\n\n\tvalues[4 * 10 + 9] = 11;\n\tvalues[0] = 42;\n\n\tmat = DenseMatrix(5, 10, values);\n\tv = mat.row(0);\n\tASSERT_EQ(v.getDimension(), 10u);\n\tfor (index i = 0; i < v.getDimension(); ++i) {\n\t\tif (i == 0) {\n\t\t\tEXPECT_EQ(42, v[i]);\n\t\t} else {\n\t\t\tEXPECT_EQ(0, v[i]);\n\t\t}\n\t}\n\n\tv = mat.column(9);\n\tASSERT_EQ(v.getDimension(), 5u);\n\tfor (index i = 0; i < v.getDimension(); ++i) {\n\t\tif (i == v.getDimension() - 1) {\n\t\t\tEXPECT_EQ(11, v[i]);\n\t\t} else {\n\t\t\tEXPECT_EQ(0, v[i]);\n\t\t}\n\t}\n\n\t// rectangular matrix\n\t// n x m (n > m)\n\n\tvalues = std::vector<double>(10*5, 0.0);\n\n\tvalues[9 * 5 + 4] = 11;\n\tvalues[0] = 42;\n\n\tmat = DenseMatrix(10, 5, values);\n\tv = mat.row(0);\n\tASSERT_EQ(v.getDimension(), 5u);\n\tfor (index i = 0; i < v.getDimension(); ++i) {\n\t\tif (i == 0) {\n\t\t\tEXPECT_EQ(42, v[i]);\n\t\t} else {\n\t\t\tEXPECT_EQ(0, v[i]);\n\t\t}\n\t}\n\n\tv = mat.column(4);\n\tASSERT_EQ(v.getDimension(), 10u);\n\tfor (index i = 0; i < v.getDimension(); ++i) {\n\t\tif (i == v.getDimension() - 1) {\n\t\t\tEXPECT_EQ(11, v[i]);\n\t\t} else {\n\t\t\tEXPECT_EQ(0, v[i]);\n\t\t}\n\t}\n}\n\nTEST(DenseMatrixGTest, testMatrixAddition) {\n\tstd::vector<double> values1(100*100, 0.0);\n\tstd::vector<double> values2(100*100, 0.0);\n\n\tfor (index i = 0; i < 100; ++i) {\n\t\tvalues1[i * 100 + i] = 1;\n\t\tvalues2[i * 100 + i] = i;\n\t}\n\n\tvalues1[2 * 100 + 71] = 1.8;\n\tvalues2[42*100 + 43] = 3.14;\n\n\tDenseMatrix mat1(100, 100, values1);\n\tDenseMatrix mat2(100, 100, values2);\n\n\tDenseMatrix result = mat1 + mat2;\n\tASSERT_EQ(mat1.numberOfRows(), result.numberOfRows());\n\tASSERT_EQ(mat1.numberOfColumns(), result.numberOfColumns());\n\n\tEXPECT_EQ(0.0, result(10, 13));\n\n\n\tfor (index i = 0; i < result.numberOfRows(); ++i) {\n\t\tEXPECT_EQ((i + 1), result(i, i));\n\t}\n\tEXPECT_EQ(1.8, result(2, 71));\n\tEXPECT_EQ(3.14, result(42, 43));\n\n\tEXPECT_EQ(0.0, result(3, 14));\n\n\n//\t// rectangular matrix\n//\t// n x m (n < m)\n//\tpositions1 = {std::make_pair(0,0), std::make_pair(1,2)};\n//\tvalues1 = {1.0, 3.0};\n//\tmat1 = DenseMatrix(2, 5, positions1, values1);\n//\n//\tpositions2 = {std::make_pair(0,2), std::make_pair(1,2)};\n//\tvalues2 = {1.0, 1.0};\n//\tmat2 = DenseMatrix(2, 5, positions2, values2);\n//\n//\tmat1.sort();\n//\tmat2.sort();\n//\n//\tresult = mat1 + mat2;\n//\n//\tASSERT_EQ(2u, result.numberOfRows());\n//\tASSERT_EQ(5u, result.numberOfColumns());\n//\n//\tEXPECT_EQ(1, result(0,0));\n//\tEXPECT_EQ(1, result(0,2));\n//\tEXPECT_EQ(4, result(1,2));\n//\n//\tEXPECT_EQ(0, result(0,1));\n//\tEXPECT_EQ(0, result(1,4));\n//\n//\t// rectangular matrix\n//\t// n x m (n > m)\n//\tpositions1 = {std::make_pair(0,0), std::make_pair(2,1)};\n//\tvalues1 = {1.0, 3.0};\n//\tmat1 = DenseMatrix(5, 2, positions1, values1);\n//\n//\tpositions2 = {std::make_pair(2,0), std::make_pair(2,1)};\n//\tvalues2 = {1.0, 1.0};\n//\tmat2 = DenseMatrix(5, 2, positions2, values2);\n//\n//\tmat1.sort();\n//\tmat2.sort();\n//\n//\tresult = mat1 + mat2;\n//\n//\tASSERT_EQ(5u, result.numberOfRows());\n//\tASSERT_EQ(2u, result.numberOfColumns());\n//\n//\tEXPECT_EQ(1, result(0,0));\n//\tEXPECT_EQ(1, result(2,0));\n//\tEXPECT_EQ(4, result(2,1));\n//\n//\tEXPECT_EQ(0, result(0,1));\n//\tEXPECT_EQ(0, result(4,1));\n}\n\n//TEST(DenseMatrixGTest, testMatrixSubtraction) {\n//\tstd::vector<std::pair<index, index> > positions1;\n//\tstd::vector<std::pair<index, index> > positions2;\n//\tstd::vector<double> values1;\n//\tstd::vector<double> values2;\n//\n//\tfor (index i = 0; i < 100000; ++i) {\n//\t\tpositions1.push_back(std::make_pair(i, i));\n//\t\tpositions2.push_back(std::make_pair(i, i));\n//\t\tvalues1.push_back(1);\n//\t\tvalues2.push_back(i);\n//\t}\n//\n//\tpositions1.push_back(std::make_pair(2, 71));\n//\tvalues1.push_back(1.8);\n//\n//\tpositions2.push_back(std::make_pair(42, 43));\n//\tvalues2.push_back(3.14);\n//\n//\tDenseMatrix mat1(100000, 100000, positions1, values1);\n//\tDenseMatrix mat2(100000, 100000, positions2, values2);\n//\n//\tmat1.sort();\n//\tmat2.sort();\n//\n//\tDenseMatrix result = mat2 - mat1;\n//\tASSERT_EQ(mat1.numberOfRows(), result.numberOfRows());\n//\tASSERT_EQ(mat1.numberOfColumns(), result.numberOfColumns());\n//\n//\tEXPECT_EQ(0.0, result(10, 13));\n//\n//\tfor (index i = 0; i < result.numberOfRows(); ++i) {\n//\t\tEXPECT_EQ(((int) i - 1), result(i, i));\n//\t}\n//\tEXPECT_EQ(-1.8, result(2, 71));\n//\tEXPECT_EQ(3.14, result(42, 43));\n//\n//\tEXPECT_EQ(0.0, result(3, 14));\n//\n//\t// rectangular matrix\n//\t// n x m (n < m)\n//\tpositions1 = {std::make_pair(0,0), std::make_pair(1,2)};\n//\tvalues1 = {1.0, 3.0};\n//\tmat1 = DenseMatrix(2, 5, positions1, values1);\n//\n//\n//\tpositions2 = {std::make_pair(0,2), std::make_pair(1,2)};\n//\tvalues2 = {1.0, 1.0};\n//\tmat2 = DenseMatrix(2, 5, positions2, values2);\n//\n//\tmat1.sort();\n//\tmat2.sort();\n//\n//\tresult = mat1 - mat2;\n//\n//\tASSERT_EQ(2u, result.numberOfRows());\n//\tASSERT_EQ(5u, result.numberOfColumns());\n//\n//\tEXPECT_EQ(1, result(0,0));\n//\tEXPECT_EQ(-1, result(0,2));\n//\tEXPECT_EQ(2, result(1,2));\n//\n//\tEXPECT_EQ(0, result(0,1));\n//\tEXPECT_EQ(0, result(1,4));\n//\n//\t// rectangular matrix\n//\t// n x m (n > m)\n//\tpositions1 = {std::make_pair(0,0), std::make_pair(2,1)};\n//\tvalues1 = {1.0, 3.0};\n//\tmat1 = DenseMatrix(5, 2, positions1, values1);\n//\n//\tpositions2 = {std::make_pair(2,0), std::make_pair(2,1)};\n//\tvalues2 = {1.0, 1.0};\n//\tmat2 = DenseMatrix(5, 2, positions2, values2);\n//\n//\tmat1.sort();\n//\tmat2.sort();\n//\n//\tresult = mat1 - mat2;\n//\n//\tASSERT_EQ(5u, result.numberOfRows());\n//\tASSERT_EQ(2u, result.numberOfColumns());\n//\n//\tEXPECT_EQ(1, result(0,0));\n//\tEXPECT_EQ(-1, result(2,0));\n//\tEXPECT_EQ(2, result(2,1));\n//\n//\tEXPECT_EQ(0, result(0,1));\n//\tEXPECT_EQ(0, result(4,1));\n//}\n\n//TEST(DenseMatrixGTest, testScalarMultiplication) {\n//\tstd::vector<std::pair<index, index> > positions;\n//\tstd::vector<double> values;\n//\n//\tfor (index i = 0; i < 10000; ++i) {\n//\t\tpositions.push_back(std::make_pair(i, i));\n//\t\tvalues.push_back(i);\n//\t}\n//\n//\tpositions.push_back(std::make_pair(42, 43));\n//\tvalues.push_back(42.0);\n//\n//\tDenseMatrix mat(10000, 10000, positions, values);\n//\tmat *= 2;\n//\tASSERT_EQ(10000u, mat.numberOfRows());\n//\tASSERT_EQ(10000u, mat.numberOfColumns());\n//\n//\tfor (index i = 0; i < 10000; ++i) {\n//\t\tEXPECT_EQ(i*2, mat(i, i));\n//\t}\n//\tEXPECT_EQ(84.0, mat(42, 43));\n//\tEXPECT_EQ(0.0, mat(55, 199));\n//\n//\tmat *= 0.5;\n//\n//\tfor (index i = 0; i < 10000; ++i) {\n//\t\tEXPECT_EQ(i, mat(i, i));\n//\t}\n//\tEXPECT_EQ(42.0, mat(42, 43));\n//\tEXPECT_EQ(0.0, mat(55, 199));\n//\n//\t// rectangular matrix\n//\tpositions = {std::make_pair(0,0), std::make_pair(1,2)};\n//\tvalues = {1.0, 3.0};\n//\tmat = DenseMatrix(2, 5, positions, values);\n//\n//\tmat *= 2;\n//\n//\tEXPECT_EQ(2, mat(0,0));\n//\tEXPECT_EQ(6, mat(1,2));\n//}\n\n//TEST(DenseMatrixGTest, testMatrixDivisionOperator) {\n//\tstd::vector<std::pair<index, index> > positions;\n//\tstd::vector<double> values;\n//\n//\tfor (index i = 0; i < 10000; ++i) {\n//\t\tpositions.push_back(std::make_pair(i, i));\n//\t\tvalues.push_back(i);\n//\t}\n//\n//\tpositions.push_back(std::make_pair(42, 43));\n//\tvalues.push_back(42.0);\n//\n//\tDenseMatrix mat(10000, 10000, positions, values);\n//\tmat /= (1.0 / 2.0);\n//\tASSERT_EQ(10000u, mat.numberOfRows());\n//\tASSERT_EQ(10000u, mat.numberOfColumns());\n//\n//\tfor (index i = 0; i < 10000; ++i) {\n//\t\tEXPECT_EQ(i*2, mat(i, i));\n//\t}\n//\tEXPECT_EQ(84.0, mat(42, 43));\n//\tEXPECT_EQ(0.0, mat(55, 199));\n//\n//\tmat /= 2;\n//\n//\tfor (index i = 0; i < 10000; ++i) {\n//\t\tEXPECT_EQ(i, mat(i, i));\n//\t}\n//\tEXPECT_EQ(42.0, mat(42, 43));\n//\tEXPECT_EQ(0.0, mat(55, 199));\n//\n//\t// rectangular matrix\n//\tpositions = {std::make_pair(0,0), std::make_pair(1,2)};\n//\tvalues = {1.0, 3.0};\n//\tmat = DenseMatrix(2, 5, positions, values);\n//\n//\tmat /= 2;\n//\n//\tEXPECT_EQ(0.5, mat(0,0));\n//\tEXPECT_EQ(1.5, mat(1,2));\n//}\n//\n//TEST(DenseMatrixGTest, testMatrixVectorProduct) {\n//\tstd::vector<std::pair<index, index> > mPositions;\n//\tstd::vector<double> mValues;\n//\n//\tfor (index i = 0; i < 10000; ++i) {\n//\t\tmPositions.push_back(std::make_pair(i, i));\n//\t\tmValues.push_back(i);\n//\t}\n//\n//\tmPositions.push_back(std::make_pair(42, 43));\n//\tmValues.push_back(42.0);\n//\n//\tVector vector(10000, 1.0);\n//\tvector[500] = 3.5;\n//\n//\tDenseMatrix mat(10000, 10000, mPositions, mValues);\n//\n//\tVector result = mat * vector;\n//\tASSERT_EQ(mat.numberOfRows(), result.getDimension());\n//\n//\tfor (index i = 0; i < 10000; ++i) {\n//\t\tif (i != 500 && i != 42 && i != 43) {\n//\t\t\tEXPECT_EQ(i, result[i]);\n//\t\t}\n//\t}\n//\n//\tEXPECT_EQ(42.0, mat(42, 43));\n//\tEXPECT_EQ(84.0, result[42]);\n//\tEXPECT_EQ(1750.0, result[500]);\n//\n//\n//\tstd::vector<std::pair<index, index> > positions;\n//\tpositions.push_back(std::make_pair(0,0));\n//\tpositions.push_back(std::make_pair(0,1));\n//\tpositions.push_back(std::make_pair(0,2));\n//\tpositions.push_back(std::make_pair(1,0));\n//\tpositions.push_back(std::make_pair(1,1));\n//\tpositions.push_back(std::make_pair(2,0));\n//\tpositions.push_back(std::make_pair(2,2));\n//\tpositions.push_back(std::make_pair(2,3));\n//\tpositions.push_back(std::make_pair(3,2));\n//\tpositions.push_back(std::make_pair(3,3));\n//\n//\tstd::vector<double> values = {1, 2, 3, 2, 2, 3, 3, -1, -1, 4};\n//\tDenseMatrix mat2(4, 4, positions, values);\n//\n//\tVector v({1,2,3,0});\n//\tVector res = mat2 * v;\n//\tASSERT_EQ(mat2.numberOfRows(), res.getDimension());\n//\n//\tEXPECT_EQ(14, res[0]);\n//\tEXPECT_EQ(6, res[1]);\n//\tEXPECT_EQ(12, res[2]);\n//\tEXPECT_EQ(-3, res[3]);\n//\n//\t// rectangular matrix\n//\tpositions = {std::make_pair(0,0), std::make_pair(1,2)};\n//\tvalues = {1.0, 3.0};\n//\tmat = DenseMatrix(2, 5, positions, values);\n//\n//\tv = {0,1,2,3,0};\n//\tres = mat * v;\n//\n//\tASSERT_EQ(2u, res.getDimension());\n//\tEXPECT_EQ(0, res[0]);\n//\tEXPECT_EQ(6, res[1]);\n//}\n//\n//TEST(DenseMatrixGTest, testMatrixMultiplication) {\n//\tstd::vector<std::pair<index, index> > positions;\n//\tstd::vector<double> values = {1, 2, 3, 2, 2, 3, 3, -1, -1, 4};\n//\n//\tpositions.push_back(std::make_pair(0,0));\n//\tpositions.push_back(std::make_pair(0,1));\n//\tpositions.push_back(std::make_pair(0,2));\n//\tpositions.push_back(std::make_pair(1,0));\n//\tpositions.push_back(std::make_pair(1,1));\n//\tpositions.push_back(std::make_pair(2,0));\n//\tpositions.push_back(std::make_pair(2,2));\n//\tpositions.push_back(std::make_pair(2,3));\n//\tpositions.push_back(std::make_pair(3,2));\n//\tpositions.push_back(std::make_pair(3,3));\n//\n//\t//\n//\t//\t\t\t\t 1 2 3 0\n//\t// \t\t\t\t 2 2 0 0\n//\t// mat1 = mat2 = 3 0 3 -1\n//\t//\t\t\t\t 0 0 -1 4\n//\t//\n//\tDenseMatrix mat1(4, 4, positions, values);\n//\tASSERT_EQ(4u, mat1.numberOfRows());\n//\tASSERT_EQ(4u, mat1.numberOfColumns());\n//\n//\tDenseMatrix mat2(4, 4, positions, values);\n//\tASSERT_EQ(4u, mat2.numberOfRows());\n//\tASSERT_EQ(4u, mat2.numberOfColumns());\n//\n//\t//\n//\t//\t\t\t14 6 12 -3\n//\t//\t\t\t 6 8 6 0\n//\t// result = 12 6 19 -7\n//\t//\t\t\t-3 0 -7 17\n//\t//\n//\tDenseMatrix result = mat1 * mat2;\n//\tASSERT_EQ(mat1.numberOfRows(), result.numberOfRows());\n//\tASSERT_EQ(mat1.numberOfColumns(), result.numberOfColumns());\n//\tEXPECT_EQ(14u, result.nnz());\n//\n//\tEXPECT_EQ(14, result(0,0));\n//\tEXPECT_EQ(6, result(0,1));\n//\tEXPECT_EQ(12, result(0,2));\n//\tEXPECT_EQ(-3, result(0,3));\n//\tEXPECT_EQ(6, result(1,0));\n//\tEXPECT_EQ(8, result(1,1));\n//\tEXPECT_EQ(6, result(1,2));\n//\tEXPECT_EQ(0, result(1,3));\n//\tEXPECT_EQ(12, result(2,0));\n//\tEXPECT_EQ(6, result(2,1));\n//\tEXPECT_EQ(19, result(2,2));\n//\tEXPECT_EQ(-7, result(2,3));\n//\tEXPECT_EQ(-3, result(3,0));\n//\tEXPECT_EQ(0, result(3,1));\n//\tEXPECT_EQ(-7, result(3,2));\n//\tEXPECT_EQ(17, result(3,3));\n//\n//\n//\t// rectangular matrices\n//\tpositions = {std::make_pair(0,0), std::make_pair(0,3), std::make_pair(1,2), std::make_pair(2,1), std::make_pair(2,3)};\n//\tvalues = {1.0, 2.0, 1.0, 2.0, 4.0};\n//\tmat1 = DenseMatrix(3, 4, positions, values);\n//\n//\tpositions = {std::make_pair(0,0), std::make_pair(2,1), std::make_pair(3,0), std::make_pair(3,1)};\n//\tvalues = {1.0, 0.5, 42.0, 1.0};\n//\tmat2 = DenseMatrix(4, 2, positions, values);\n//\n//\tresult = mat1 * mat2;\n//\n//\tEXPECT_EQ(85, result(0,0));\n//\tEXPECT_EQ(2, result(0,1));\n//\tEXPECT_EQ(0, result(1,0));\n//\tEXPECT_EQ(0.5, result(1,1));\n//\tEXPECT_EQ(168, result(2,0));\n//\tEXPECT_EQ(4, result(2,1));\n//}\n//\n//TEST(DenseMatrixGTest, testBigMatrixMultiplication) {\n//\tMETISGraphReader graphReader;\n//\tGraph G = graphReader.read(\"input/PGPgiantcompo.graph\");\n//\n//\tstd::vector<std::pair<index,index>> positions;\n//\tstd::vector<double> values;\n//\n//\tG.forEdges([&](index i, index j, double value) {\n//\t\tpositions.push_back(std::make_pair(i,j));\n//\t\tvalues.push_back(value);\n//\t});\n//\n//\tDenseMatrix mat(G.upperNodeIdBound(), G.upperNodeIdBound(), positions, values);\n//\n//\tDenseMatrix result = mat * mat;\n//\tASSERT_EQ(mat.numberOfRows(), result.numberOfRows());\n//\tASSERT_EQ(mat.numberOfColumns(), result.numberOfColumns());\n//}\n//\n//TEST(DenseMatrixGTest, testTransposition) {\n//\t//\n//\t//\t 1 2 3 1 1\n//\t// \t 0 2 0 0 0\n//\t// mat 4 0 3 -1 0\n//\t//\t 0 0 0 4 -1\n//\t//\n//\tstd::vector<std::pair<index, index> > positions;\n//\tstd::vector<double> values = {1, 2, 3, 1, 1, 2, 4, 3, -1, 4, -1};\n//\n//\tpositions.push_back(std::make_pair(0,0));\n//\tpositions.push_back(std::make_pair(0,1));\n//\tpositions.push_back(std::make_pair(0,2));\n//\tpositions.push_back(std::make_pair(0,3));\n//\tpositions.push_back(std::make_pair(0,4));\n//\tpositions.push_back(std::make_pair(1,1));\n//\tpositions.push_back(std::make_pair(2,0));\n//\tpositions.push_back(std::make_pair(2,2));\n//\tpositions.push_back(std::make_pair(2,3));\n//\tpositions.push_back(std::make_pair(3,3));\n//\tpositions.push_back(std::make_pair(3,4));\n//\n//\tDenseMatrix mat(4, 5, positions, values);\n//\tDenseMatrix matT = mat.transpose();\n//\n//\tEXPECT_EQ(5u, matT.numberOfRows());\n//\tEXPECT_EQ(4u, matT.numberOfColumns());\n//\n//\tmat.forNonZeroElementsInRowOrder([&](index i, index j, double value) {\n//\t\tEXPECT_EQ(value, matT(j,i));\n//\t});\n//}\n//\n//TEST(DenseMatrixGTest, testMatrixTransposeMatrixMultiplication) {\n//\tstd::vector<std::pair<index,index>> positions = {std::make_pair(0,0), std::make_pair(0,1), std::make_pair(0,2), std::make_pair(1,1), std::make_pair(2,0), std::make_pair(3,2)};\n//\tstd::vector<double> values = {1.0, 2.0, 3.0, 2.0, 3.0, -1.0};\n//\tDenseMatrix A(4, 3, positions, values);\n//\n//\tpositions = {std::make_pair(0,0), std::make_pair(1,0), std::make_pair(2,1), std::make_pair(3,1), std::make_pair(3,2)};\n//\tvalues = {1.0, 3.0, -2.0, 5.0, -8.0};\n//\tDenseMatrix B(4, 3, positions, values);\n//\n//\tDenseMatrix C = DenseMatrix::mTmMultiply(A, B);\n//\n//\tEXPECT_EQ(1, C(0,0));\n//\tEXPECT_EQ(-6, C(0,1));\n//\tEXPECT_EQ(0, C(0,2));\n//\tEXPECT_EQ(8, C(1,0));\n//\tEXPECT_EQ(0, C(1,1));\n//\tEXPECT_EQ(0, C(1,2));\n//\tEXPECT_EQ(3, C(2,0));\n//\tEXPECT_EQ(-5, C(2,1));\n//\tEXPECT_EQ(8, C(2,2));\n//}\n//\n//TEST(DenseMatrixGTest, testMatrixMatrixTransposeMultiplication) {\n//\tstd::vector<std::pair<index,index>> positions = {std::make_pair(0,0), std::make_pair(0,1), std::make_pair(0,2), std::make_pair(1,1), std::make_pair(2,0), std::make_pair(3,2)};\n//\tstd::vector<double> values = {1.0, 2.0, 3.0, 2.0, 3.0, -1.0};\n//\tDenseMatrix A(4, 3, positions, values);\n//\n//\tpositions = {std::make_pair(0,0), std::make_pair(1,0), std::make_pair(2,1), std::make_pair(3,1), std::make_pair(3,2)};\n//\tvalues = {1.0, 3.0, -2.0, 5.0, -8.0};\n//\tDenseMatrix B(4, 3, positions, values);\n//\n//\tDenseMatrix C = DenseMatrix::mmTMultiply(A, B);\n//\n//\tEXPECT_EQ(1, C(0,0));\n//\tEXPECT_EQ(3, C(0,1));\n//\tEXPECT_EQ(-4, C(0,2));\n//\tEXPECT_EQ(-14, C(0,3));\n//\tEXPECT_EQ(-4, C(1,2));\n//\tEXPECT_EQ(10, C(1,3));\n//\tEXPECT_EQ(3, C(2,0));\n//\tEXPECT_EQ(9, C(2,1));\n//\tEXPECT_EQ(8, C(3,3));\n//\tEXPECT_EQ(0, C(1,0));\n//\tEXPECT_EQ(0, C(1,1));\n//\tEXPECT_EQ(0, C(2,3));\n//}\n//\n//TEST(DenseMatrixGTest, testMatrixTransposeVectorMultiplication) {\n//\tstd::vector<std::pair<index,index>> positions = {std::make_pair(0,0), std::make_pair(2,1)};\n//\tstd::vector<double> values = {1.0, 3.0};\n//\tDenseMatrix mat(5, 2, positions, values);\n//\n//\tVector v = {0,1,2,3,0};\n//\tVector res = DenseMatrix::mTvMultiply(mat, v);\n//\n//\tASSERT_EQ(2u, res.getDimension());\n//\tEXPECT_EQ(0, res[0]);\n//\tEXPECT_EQ(6, res[1]);\n//}\n//\n//TEST(DenseMatrixGTest, testMatrixDiagonal) {\n//\t//\n//\t//\t 1 2 3 1 1\n//\t// \t 0 2 0 0 0\n//\t// mat 4 0 0 -1 0\n//\t//\t 0 0 0 4 -1\n//\t//\n//\tstd::vector<std::pair<index, index> > positions;\n//\tstd::vector<double> values = {1, 2, 3, 1, 1, 2, 4, -1, 4, -1};\n//\n//\tpositions.push_back(std::make_pair(0,0));\n//\tpositions.push_back(std::make_pair(0,1));\n//\tpositions.push_back(std::make_pair(0,2));\n//\tpositions.push_back(std::make_pair(0,3));\n//\tpositions.push_back(std::make_pair(0,4));\n//\tpositions.push_back(std::make_pair(1,1));\n//\tpositions.push_back(std::make_pair(2,0));\n//\tpositions.push_back(std::make_pair(2,3));\n//\tpositions.push_back(std::make_pair(3,3));\n//\tpositions.push_back(std::make_pair(3,4));\n//\n//\tDenseMatrix mat(4, 5, positions, values);\n//\n//\tVector diag1 = mat.diagonal();\n//\tEXPECT_EQ(1, diag1[0]);\n//\tEXPECT_EQ(2, diag1[1]);\n//\tEXPECT_EQ(0, diag1[2]);\n//\tEXPECT_EQ(4, diag1[3]);\n//\n//\tmat.sort();\n//\tVector diag2 = mat.diagonal();\n//\tfor (index i = 0; i < diag2.getDimension(); ++i) {\n//\t\tEXPECT_EQ(diag1[i], diag2[i]);\n//\t}\n//}\n\nTEST(DenseMatrixGTest, testLUDecomposition) {\n\t// \t\t 1 2 4\n\t// mat1 = 3 8 14\n\t// \t\t 2 6 13\n\n\n\tstd::vector<double> values = {1,2,4,3,8,14,2,6,13};\n\tDenseMatrix mat(3, 3, values);\n\tDenseMatrix::LUDecomposition(mat);\n\n\tEXPECT_EQ(1, mat(0,0));\n\tEXPECT_EQ(2, mat(0,1));\n\tEXPECT_EQ(4, mat(0,2));\n\tEXPECT_EQ(3, mat(1,0));\n\tEXPECT_EQ(2, mat(1,1));\n\tEXPECT_EQ(2, mat(1,2));\n\tEXPECT_EQ(2, mat(2,0));\n\tEXPECT_EQ(1, mat(2,1));\n\tEXPECT_EQ(3, mat(2,2));\n\n\tVector expected = {3, 4, -2};\n\tVector b = {3, 13, 4};\n\tVector luResult = DenseMatrix::LUSolve(mat, b);\n\n\tfor (index i = 0; i < expected.getDimension(); ++i) {\n\t\tEXPECT_EQ(expected[i], luResult[i]);\n\t}\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6544944047927856, "alphanum_fraction": 0.6713483333587646, "avg_line_length": 11.714285850524902, "blob_id": "73e5c25acc54e667db4418032a49f599e9301cbc", "content_id": "d691255b0de9160e27f6c287ff62eda7bdb8239e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 356, "license_type": "permissive", "max_line_length": 42, "num_lines": 28, "path": "/networkit/cpp/matching/test/MatcherGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MatcherGTest.h\n *\n * Created on: Jun 14, 2013\n * Author: Henning\n */\n\n#ifndef NOGTEST\n\n#ifndef MATCHERGTEST_H_\n#define MATCHERGTEST_H_\n\n#include <gtest/gtest.h>\n\n\nnamespace NetworKit {\n\nclass MatcherGTest: public testing::Test {\npublic:\n\tMatcherGTest() = default;\n\tvirtual ~MatcherGTest() = default;\n};\n\n}\n\n#endif /* MATCHERGTEST_H_ */\n\n#endif\n" }, { "alpha_fraction": 0.5823737978935242, "alphanum_fraction": 0.6000885963439941, "avg_line_length": 22.278350830078125, "blob_id": "fc06ac65010e108ee98f93b126a5c941cdf74320", "content_id": "0f7e368fc30a7d5b635ea09e3fe1f71b59b3660f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2258, "license_type": "permissive", "max_line_length": 134, "num_lines": 97, "path": "/networkit/cpp/generators/RmatGenerator.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * RmatGenerator.cpp\n *\n * Created on: 18.03.2014\n * Author: Henning, cls\n */\n\n#include \"RmatGenerator.h\"\n#include \"../auxiliary/Random.h\"\n#include \"../auxiliary/NumericTools.h\"\n#include \"../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\nRmatGenerator::RmatGenerator(count scale, count edgeFactor, double a, double b, double c, double d, bool weighted, count reduceNodes):\n\tscale(scale), edgeFactor(edgeFactor), a(a), b(b), c(c), d(d), weighted(weighted), reduceNodes(reduceNodes)\n{\n if (scale > 63) throw std::runtime_error(\"Cannot generate more than 2^63 nodes\");\n\tdouble sum = a+b+c+d;\n\tINFO(\"sum of probabilities: \", sum);\n\tif (!Aux::NumericTools::equal(sum, 1.0, 0.0001)) throw std::runtime_error(\"Probabilities in Rmat have to sum to 1.\");\n\tdefaultEdgeWeight = 1.0;\n}\n\nGraph RmatGenerator::generate() {\n\tcount n = (1 << scale);\n\tcount numEdges = n * edgeFactor;\n\tGraph G(n, true);\n\tdouble ab = a+b;\n\tdouble abc = ab+c;\n\n\tauto quadrant([&]() {\n\t\tdouble r = Aux::Random::probability();\n\t\tTRACE(\"r: \", r);\n\n\t\tif (r <= a) {\n\t\t\treturn 0;\n\t\t}\n\t\telse if (r <= ab) {\n\t\t\treturn 1;\n\t\t}\n\t\telse if (r <= abc) {\n\t\t\treturn 2;\n\t\t}\n\t\telse return 3;\n\t});\n\n\tauto drawEdge([&]() {\n\t\tnode u = 0;\n\t\tnode v = 0;\n\t\tfor (index i = 0; i < scale; ++i) {\n\t\t\tcount q = quadrant();\n//\t\t\tTRACE(\"q: \", q);\n\t\t\tu = u << 1;\n\t\t\tv = v << 1;\n\t\t\tu = u | (q >> 1);\n\t\t\tv = v | (q & 1);\n\t\t}\n\n\t\treturn std::make_pair(u, v);\n\t});\n\n\tfor (index e = 0; e < numEdges; ++e) {\n\t\tstd::pair<node, node> drawnEdge = drawEdge();\n//\t\tTRACE(\"edge drawn: \", drawnEdge.first, \" - \", drawnEdge.second);\n\t\tG.increaseWeight(drawnEdge.first, drawnEdge.second, defaultEdgeWeight);\n\t}\n\n\t// delete random nodes to achieve node count\n\tINFO(\"deleting random nodes: \", reduceNodes);\n\tfor (count i = 0; i < reduceNodes; ++i) {\n\t\tnode u = G.randomNode();\n\t\tstd::vector<std::pair<node, node>> incidentEdges;\n\t\tG.forEdgesOf(u, [&](node u, node v) {\n\t\t\tincidentEdges.push_back({u,v});\n\t\t});\n\t\tfor (auto edge : incidentEdges) {\n\t\t\tnode x, y;\n\t\t\tstd::tie(x, y) = edge;\n\t\t\tG.removeEdge(x, y);\n\t\t}\n\t\tassert (G.degree(u) == 0);\n\t\tG.removeNode(u);\n\t}\n\n\tif (!weighted) {\n\t\t// set unit weights\n\t\tG.forEdges([&](node u, node v) {\n\t\t\tG.setWeight(u, v, 1.0);\n\t\t});\n\t}\n\n\tG.shrinkToFit();\n\treturn G;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.7043429613113403, "alphanum_fraction": 0.7288418412208557, "avg_line_length": 50.31428527832031, "blob_id": "ef586ad3192c94c3a944673b493ac40ea39ab42a", "content_id": "5ef4da87123aa63b6b96a8a424275392fe7a4b1b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1797, "license_type": "permissive", "max_line_length": 169, "num_lines": 35, "path": "/Doc/doc/student_theses.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": ".. |br| raw:: html\n\n <br />\n\n==============\nStudent Theses\n==============\n\nThis is a list of student theses based on our NetworKit tool suite.\n\n* Michael Wegner: Graphenzeichnen und Distanzgeometrien. Masterarbeit, ongoing. |br| Supervisors: H. Meyerhenke, E. Bergamini \n\n* Maximilian Vogel: Advancing Algorithms and Methodology for Exploratory Network Analysis. Masterarbeit, ongoing. |br| Supervisors: C.L. Staudt, G. Schädler (Fak. für Physik), H. Meyerhenke\n\n* Dominik Kiefer: Fast Maximization of Betweenness and Closeness of a Node. Bachelorarbeit, April 2016. |br| Supervisors: E. Bergamini, H. Meyerhenke.\n\n* Peter Eisenmann: Berechnung kompatibler Pfade in DAGs. Bachelorarbeit, November 2015. |br| Supervisors: R. Glantz, H. Meyerhenke\n\n* Mark Erb: Automatisierte Analyse komplexer Netzwerke. Studienarbeit, October 2015. |br| Supervisors: C. Staudt, H. Meyerhenke.\n\n* Dennis Felsing: Parallele Graphenalgorithmen auf Intel Xeon Phi. Masterarbeit, August 2015. |br| Supervisors: M. v. Looz, H. Meyerhenke\n\n* Patrick Bisenius: Partitioning and Reparatitioning using size-constrained label propagation and NetworKit . Bachelorarbeit, May 2015. |br| Supervisors: R. Glantz, H. Meyerhenke\n\n* Kolja Esders: Link Prediction in Complex Networks. Bachelorarbeit, May 2015. |br| Supervisors: E. Bergamini, C. Staudt, H. Meyerhenke.\n\n* Daniel Hoske: A Fast Combinatorial Solver for Laplacian Matrices. Masterarbeit, December 2014. |br| Supervisors: H. Meyerhenke, D. Lukarski\n\n* Marc Nemes: Extending NetworKit. Bachelorarbeit, December 2014. |br| Supervisors: C. Staudt, H. Meyerhenke\n\n* Gerd Lindner: Complex Network Backbones. Bachelorarbeit, September 2014. |br| Supervisors: C. Staudt, M. Hamann, H. Meyerhenke\n\n* Jannis Koch: Network Analysis on Distributed Systems. Bachelorarbeit, October 2014. |br| Supervisors: C. Staudt, H. Meyerhenke\n\n* Yassine Marrakchi: Selective Community Detection in Complex Networks. Bachelorarbeit, October 2013. |br| Supervisors: C. Staudt, H. Meyerhenke.\n" }, { "alpha_fraction": 0.5051229596138, "alphanum_fraction": 0.5717213153839111, "avg_line_length": 18.520000457763672, "blob_id": "83fcd3561752ae05030512191725641ec50f43b6", "content_id": "dc8ce3d0a876045f30ec708c9db99dd57f421816", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 976, "license_type": "permissive", "max_line_length": 143, "num_lines": 50, "path": "/networkit/cpp/graph/test/APSPGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * APSPGTest.cpp\n *\n * Created on: 07.07.2015\n * Author: Arie Slobbe, Elisabetta Bergamini\n */\n\n#ifndef NOGTEST\n\n#include \"APSPGTest.h\"\n#include \"../APSP.h\"\n#include <string>\n\n\nnamespace NetworKit {\n\nTEST_F(APSPGTest, testAPSP) {\n/* Graph:\n ______\n\t\t/ \\\n\t 0 3 6\n\t\t\\ / \\ /\n\t\t 2 5\n\t\t/ \\ / \\\n\t 1 4 7\n*/\n\tint n = 8;\n\tGraph G(n);\n\n\tG.addEdge(0, 2);\n\tG.addEdge(1, 2);\n\tG.addEdge(2, 3);\n\tG.addEdge(2, 4);\n\tG.addEdge(3, 5);\n\tG.addEdge(4, 5);\n\tG.addEdge(5, 6);\n\tG.addEdge(5, 7);\n\tG.addEdge(0, 6);\n\n\tAPSP apsp(G);\n\tapsp.run();\n\tstd::vector<std::vector<edgeweight> > distances = apsp.getDistances();\n\tINFO(\"distances[0]: \", distances[0][0], distances[0][1], distances[0][2], distances[0][3], distances[0][4], distances[0][5], distances[0][6]);\n\tINFO(\"distances[1]: \", distances[1][0], distances[1][1], distances[1][2], distances[1][3], distances[1][4], distances[1][5], distances[1][6]);\n\tEXPECT_TRUE(apsp.isParallel());\n}\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6592348217964172, "alphanum_fraction": 0.6799602508544922, "avg_line_length": 25.354482650756836, "blob_id": "68350e083486796d86824e42cc39eab18e490701", "content_id": "34b6436f3e4fa427fe7348f26c7590b5c6365ede", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 19107, "license_type": "permissive", "max_line_length": 131, "num_lines": 725, "path": "/networkit/cpp/io/test/IOGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * IOGTest.cpp\n *\n * Created on: 12.12.2012\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#include \"IOGTest.h\"\n\n#include <fstream>\n#include <unordered_set>\n#include <vector>\n\n#include \"../METISGraphReader.h\"\n#include \"../METISGraphWriter.h\"\n#include \"../PartitionWriter.h\"\n#include \"../PartitionReader.h\"\n#include \"../GraphIO.h\"\n#include \"../DotGraphWriter.h\"\n#include \"../DGSReader.h\"\n#include \"../EdgeListWriter.h\"\n#include \"../EdgeListPartitionReader.h\"\n#include \"../SNAPEdgeListPartitionReader.h\"\n#include \"../SNAPGraphWriter.h\"\n#include \"../EdgeListReader.h\"\n#include \"../GMLGraphWriter.h\"\n#include \"../EdgeListCoverReader.h\"\n#include \"../CoverReader.h\"\n#include \"../CoverWriter.h\"\n#include \"../GMLGraphReader.h\"\n#include \"../GraphToolBinaryReader.h\"\n#include \"../GraphToolBinaryWriter.h\"\n#include \"../../generators/ErdosRenyiGenerator.h\"\n\n#include \"../../community/GraphClusteringTools.h\"\n#include \"../../auxiliary/Log.h\"\n#include \"../../community/ClusteringGenerator.h\"\n#include \"../../structures/Partition.h\"\n#include \"../../community/Modularity.h\"\n#include \"../../community/PLP.h\"\n\n\nnamespace NetworKit {\n\nTEST_F(IOGTest, testGraphIOEdgeList) {\n\tErdosRenyiGenerator graphGen(100, 0.1);\n\tGraph G = graphGen.generate();\n\n\tGraphIO graphio;\n\tstd::string path = \"output/edgelist.txt\";\n\tgraphio.writeEdgeList(G, path);\n\n\tbool exists = false;\n\tstd::ifstream file(path);\n\tif (file) {\n\t\texists = true;\n\t}\n\tEXPECT_TRUE(exists) << \"A file should have been created : \" << path;\n}\n\nTEST_F(IOGTest, testGraphIOAdjacencyList) {\n\tErdosRenyiGenerator graphGen(100, 0.1);\n\tGraph G = graphGen.generate();\n\tGraphIO graphio;\n\tstd::string path = \"output/circular.adjlist\";\n\tgraphio.writeAdjacencyList(G, path);\n\n\tbool exists = false;\n\tstd::ifstream file(path);\n\tif (file) {\n\t\texists = true;\n\t}\n\tEXPECT_TRUE(exists) << \"A file should have been created : \" << path;\n}\n\n\nTEST_F(IOGTest, testGraphIOForIsolatedNodes) {\n\tGraph G(20);\n\tGraphIO graphio;\n\tstd::string path = \"output/isolated.adjlist\";\n\tgraphio.writeAdjacencyList(G, path);\n\n\tbool exists = false;\n\tstd::ifstream file(path);\n\tif (file) {\n\t\texists = true;\n\t}\n\tEXPECT_TRUE(exists) << \"A file should have been created : \" << path;\n}\n\n\n\nTEST_F(IOGTest, testMETISGraphReader) {\n\tstd::string path = \"input/jazz.graph\";\n\n\tMETISGraphReader reader;\n\tGraph G = reader.read(path);\n\tcount n = 198;\n\tcount m = 2742;\n\tEXPECT_FALSE(G.isEmpty());\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"There are \" << n << \" nodes in the graph\";\n\tEXPECT_EQ(m, G.numberOfEdges()) << \"There are \" << m << \" edges in the graph\";\n\n\tfor (index v = 0; v < n; ++v) {\n\t\tEXPECT_TRUE(G.hasNode(v)) << \"Node \" << v << \" should be there\";\n\t}\n\n\n\t// graph polblogs (has singletons)\n\tpath = \"input/polblogs.graph\";\n\tG = reader.read(path);\n\tn = 1490;\n\tm = 16715;\n\tEXPECT_FALSE(G.isEmpty());\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"There are \" << n << \" nodes in the graph\";\n\tEXPECT_EQ(m, G.numberOfEdges()) << \"There are \" << m << \" edges in the graph\";\n\n\tfor (index v = 0; v < n; ++v) {\n\t\tEXPECT_TRUE(G.hasNode(v)) << \"Node \" << v << \" should be there\";\n\t}\n\n\n\t// graph PGPgiantcompo\n\tpath = \"input/PGPgiantcompo.graph\";\n\tG = reader.read(path);\n\tn = 10680;\n\tm = 24316;\n\tEXPECT_FALSE(G.isEmpty());\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"There are \" << n << \" nodes in the graph\";\n\tEXPECT_EQ(m, G.numberOfEdges()) << \"There are \" << m << \" edges in the graph\";\n\n\tfor (index v = 0; v < n; ++v) {\n\t\tEXPECT_TRUE(G.hasNode(v)) << \"Node \" << v << \" should be there\";\n\t}\n}\n\nTEST_F(IOGTest, testMETISGraphReaderWithTinyGraphs) {\n\t/* These graphs are from the METIS documentation and cover different settings\n\t\tof the fmt flag in the header of a METIS graph file */\n\tcount n = 7;\n\tcount m = 11;\n\tMETISGraphReader reader;\n\n\tstd::string path = \"input/tiny_01.graph\";\n\tGraph G = reader.read(path);\n\tEXPECT_FALSE(G.isEmpty());\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"There are \" << n << \" nodes in the graph\";\n\tEXPECT_EQ(m, G.numberOfEdges()) << \"There are \" << m << \" edges in the graph\";\n\n\tfor (index v = 0; v < n; ++v) {\n\t\tEXPECT_TRUE(G.hasNode(v)) << \"Node \" << v << \" should be there\";\n\t}\n\n\tpath = \"input/tiny_02.graph\";\n\tG = reader.read(path);\n\tEXPECT_FALSE(G.isEmpty());\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"There are \" << n << \" nodes in the graph\";\n\tEXPECT_EQ(m, G.numberOfEdges()) << \"There are \" << m << \" edges in the graph\";\n\n\tfor (index v = 0; v < n; ++v) {\n\t\tEXPECT_TRUE(G.hasNode(v)) << \"Node \" << v << \" should be there\";\n\t}\n\n\tpath = \"input/tiny_03.graph\";\n\tG = reader.read(path);\n\tEXPECT_FALSE(G.isEmpty());\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"There are \" << n << \" nodes in the graph\";\n\tEXPECT_EQ(m, G.numberOfEdges()) << \"There are \" << m << \" edges in the graph\";\n\n\tfor (index v = 0; v < n; ++v) {\n\t\tEXPECT_TRUE(G.hasNode(v)) << \"Node \" << v << \" should be there\";\n\t}\n\n\tpath = \"input/tiny_04.graph\";\n\tG = reader.read(path);\n\tEXPECT_FALSE(G.isEmpty());\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"There are \" << n << \" nodes in the graph\";\n\tEXPECT_EQ(m, G.numberOfEdges()) << \"There are \" << m << \" edges in the graph\";\n\n\tfor (index v = 0; v < n; ++v) {\n\t\tEXPECT_TRUE(G.hasNode(v)) << \"Node \" << v << \" should be there\";\n\t}\n\n}\n/*\nTEST_F(IOGTest, testMETISGraphReaderWithDoubleWeights) {\n\tstd::string path = \"input/jazz2double.graph\";\n\n\tFastMETISGraphReader reader;\n\tGraph G = reader.read(path);\n\n\tEXPECT_FALSE(G.isEmpty());\n\tcount n = 5;\n\tcount m = 6;\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"There are \" << n << \" nodes in the graph\";\n\tEXPECT_EQ(m, G.numberOfEdges()) << \"There are \" << m << \" edges in the graph\";\n\n\tfor (index v = 0; v < n; ++v) {\n\t\tEXPECT_TRUE(G.hasNode(v)) << \"Node \" << v << \" should be there\";\n\t}\n\tdouble edgeweight = 7.71099;\n\tdouble abs = 1e-9;\n\tEXPECT_LE(G.totalEdgeWeight()-edgeweight,abs) << \"Total edgeweight should be \" << edgeweight;\n}\n*/\n/* Old and therefore not actually needed */\n/*TEST_F(IOGTest, testMETISGraphReaderWithWeights) {\n\tstd::string path = \"input/lesmis.graph\";\n\n\tMETISGraphReader reader;\n\tGraph G = reader.read(path);\n\n\tEXPECT_FALSE(G.isEmpty());\n\tcount n = 77;\n\tcount m = 254;\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"There are \" << n << \" nodes in the graph\";\n\tEXPECT_EQ(m, G.numberOfEdges()) << \"There are \" << m << \" edges in the graph\";\n\n\tfor (index v = 0; v < n; ++v) {\n\t\tEXPECT_TRUE(G.hasNode(v)) << \"Node \" << v << \" should be there\";\n\t}\n}*/\n\nTEST_F(IOGTest, testMETISGraphWriter) {\n\tstd::string path = \"output/jazz1.graph\";\n\tGraph G = Graph(3);\n\tG.addEdge(0,2);\n\tG.addEdge(1,1);\n\tG.addEdge(1,2);\n\tG.addEdge(2,2);\n\n\tMETISGraphWriter writer;\n\twriter.write(G, false, path);\n\tbool exists = false;\n\tstd::ifstream file(path);\n\tif (file) {\n\t\texists = true;\n\t}\n\tEXPECT_TRUE(exists) << \"A file should have been created : \" << path;\n\n}\n\n\nTEST_F(IOGTest, testMETISGraphWriterWithWeights) {\n\tstd::string path = \"output/jazz2.graph\";\n\tGraph G = Graph(5);\n\tG.addEdge(0,2);\n\tG.addEdge(0,1);\n\tG.addEdge(0,0);\n\tG.addEdge(1,1);\n\n\tMETISGraphWriter writer;\n\twriter.write(G, true, path);\n bool exists = false;\n\tstd::ifstream file(path);\n\tif (file) {\n\t\texists = true;\n\t}\n\tEXPECT_TRUE(exists) << \"A file should have been created : \" << path;\n\n}\n\nTEST_F(IOGTest, testPartitionWriterAndReader) {\n\t// write clustering first\n\tstd::string path = \"output/example.clust\";\n\n\tcount n = 100;\n\tcount k = 3;\n\tErdosRenyiGenerator graphGen(n, 0.1);\n\tGraph G = graphGen.generate();\n\n\tClusteringGenerator clusteringGen;\n\tPartition zeta = clusteringGen.makeRandomClustering(G, k);\n\n\tPartitionWriter writer;\n\twriter.write(zeta, path);\n\n\t// check if file exists\n\tbool exists = false;\n\tstd::ifstream file(path);\n\tif (file) {\n\t\texists = true;\n\t}\n\tEXPECT_TRUE(exists) << \"clustering file should have been written to: \" << path;\n\n\n\tPartitionReader reader;\n\tPartition read = reader.read(path);\n\n\tEXPECT_EQ(n, read.numberOfElements()) << \"read clustering should contain n nodes\";\n\tEXPECT_TRUE(GraphClusteringTools::isProperClustering(G, read)) << \"read clustering should be proper clustering of G\";\n\tEXPECT_TRUE(GraphClusteringTools::equalClusterings(read, zeta, G)) << \"read clustering should be identical to created clustering\";\n}\n\n\nTEST_F(IOGTest, testDotGraphWriter) {\n\tErdosRenyiGenerator graphGen(100, 0.1);\n\tGraph G = graphGen.generate();\n\n\tstd::string path = \"output/example.dot\";\n\n\tDotGraphWriter writer;\n\twriter.write(G, path);\n\n\t// check if file exists\n\tbool exists = false;\n\tstd::ifstream file(path);\n\tif (file) {\n\t\texists = true;\n\t}\n\tEXPECT_TRUE(exists) << \"graph file should have been written to: \" << path;\n}\n\nTEST_F(IOGTest, tryDGSReaderOnBigFile) {\n\t// read example graph\n\tDGSReader reader;\n\tGraph G;\n\tGraphEventProxy Gproxy(G);\n\treader.read(\"/Users/forigem/KIT/NetworKit-CommunityDetection/input/AuthorsGraph.dgs\", Gproxy);\n}\n\n\n\nTEST_F(IOGTest, tryDGSReader) {\n\t// read example graph\n\tDGSReader reader;\n\tGraph G;\n\tGraphEventProxy Gproxy(G);\n\treader.read(\"input/example2.dgs\", Gproxy);\n\n\t// get input parameters\n\tcount nodeCount = G.numberOfNodes();\n\tDEBUG(\"Number of nodes \" , nodeCount);\n\tEXPECT_EQ(3u, nodeCount);\n\tcount edgeCount = G.numberOfEdges();\n\tDEBUG(\"Number of edges \" , edgeCount);\n\tEXPECT_EQ(2u, edgeCount);\n\n\tG.forNodes([&](node n) {\n\t\tDEBUG(\"DEGREE OF NODE: \" , G.degree(n) , \"\\n\");\n\t});\n\n}\n\nTEST_F(IOGTest, testEdgeListReader) {\n\tEdgeListReader reader('\\t', 1);\n\n\tstd::string path = \"input/LFR-generator-example/network.dat\";\n\tDEBUG(\"reading file: \" , path);\n\tGraph G = reader.read(path);\n\tEXPECT_EQ(10u, G.numberOfNodes());\n\tEXPECT_EQ(10u, G.numberOfEdges());\n\tEXPECT_TRUE(G.hasEdge(0, 5));\n\tEXPECT_TRUE(G.hasEdge(2, 9));\n\tEXPECT_TRUE(G.hasEdge(1, 7));\n\n\tpath = \"input/example.edgelist\";\n\tDEBUG(\"reading file: \" , path);\n\tEdgeListReader reader2('\\t', 1);\n\tGraph G2 = reader2.read(path);\n\tEXPECT_EQ(10u, G2.numberOfEdges());\n\tEXPECT_TRUE(G2.hasEdge(0, 4));\n\n\tpath = \"input/spaceseparated.edgelist\";\n\tDEBUG(\"reading file: \" , path);\n\tEdgeListReader reader3(' ', 1);\n\tGraph G3 = reader3.read(path);\n\tEXPECT_EQ(10u, G3.numberOfEdges());\n\tEXPECT_TRUE(G3.hasEdge(0, 4));\n\n\tpath = \"input/spaceseparated_weighted.edgelist\";\n\tDEBUG(\"reading file: \" , path);\n\tGraph G32 = reader3.read(path);\n\tEXPECT_TRUE(G32.isWeighted());\n\tEXPECT_EQ(2,G32.weight(0,1));\n\tEXPECT_EQ(4,G32.weight(0,2));\n\tEXPECT_EQ(3,G32.weight(1,2));\n\n\tpath = \"input/comments.edgelist\";\n\tDEBUG(\"reading file: \" , path);\n\tEdgeListReader reader4('\\t', 1);\n\tGraph G4 = reader4.read(path);\n\tEXPECT_EQ(10u, G4.numberOfEdges());\n\tEXPECT_TRUE(G4.hasEdge(0, 4));\n\n}\n\nTEST_F(IOGTest, testEdgeListPartitionReader) {\n\tEdgeListPartitionReader reader(1);\n\n\tPartition zeta = reader.read(\"input/LFR-generator-example/community.dat\");\n\t//EXPECT_EQ(10, zeta.size());\n\tEXPECT_EQ(1u, zeta[0]);\n\tEXPECT_EQ(3u, zeta[1]);\n\tEXPECT_EQ(2u, zeta[2]);\n\tEXPECT_EQ(10u, zeta.numberOfElements());\n\n}\n\nTEST_F(IOGTest, testEdgeListCoverReader) {\n\tEdgeListCoverReader reader(1);\n\tEdgeListReader gReader('\\t', 1);\n\n\tGraph G = gReader.read(\"input/LFR-generator-example/network_overlapping.dat\");\n\tCover zeta = reader.read(\"input/LFR-generator-example/community_overlapping.dat\", G);\n\tEXPECT_EQ(9u, zeta.upperBound());\n\tEXPECT_EQ(10u, zeta.numberOfElements());\n\tEXPECT_EQ(1u, zeta[0].count(1));\n\tEXPECT_EQ(3u, zeta[0].size());\n\tEXPECT_EQ(1u, zeta[3].size());\n}\n\nTEST_F(IOGTest, testCoverReader) {\n\tCoverReader reader;\n\tEdgeListReader gReader('\\t', 1);\n\n\tGraph G = gReader.read(\"input/LFR-generator-example/network_overlapping.dat\");\n\tCover zeta = reader.read(\"input/LFR-generator-example/community_overlapping.cover\", G);\n\tEXPECT_EQ(9u, zeta.upperBound());\n\tEXPECT_EQ(10u, zeta.numberOfElements());\n\tEXPECT_EQ(1u, zeta[0].count(1));\n\tEXPECT_EQ(3u, zeta[0].size());\n\tEXPECT_EQ(1u, zeta[3].size());\n}\n\nTEST_F(IOGTest, testCoverWriter) {\n\tstd::string outpath = \"output/coverWriter_test.cover\";\n\tCoverWriter writer;\n\tCoverReader reader;\n\tEdgeListReader gReader('\\t', 1);\n\n\tGraph G = gReader.read(\"input/LFR-generator-example/network_overlapping.dat\");\n\tCover zeta = reader.read(\"input/LFR-generator-example/community_overlapping.cover\", G);\n\n\twriter.write(zeta, outpath);\n\n\tCover read = reader.read(outpath, G);\n\tEXPECT_EQ(9u, read.upperBound());\n\tEXPECT_EQ(10u, read.numberOfElements());\n\tEXPECT_EQ(1u, read[0].count(1));\n\tEXPECT_EQ(3u, read[0].size());\n\tEXPECT_EQ(1u, read[3].size());\n}\n\n\n\nTEST_F(IOGTest, testMETISGraphReaderForNodeExistence2) {\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/jazz.graph\");\n\tEXPECT_TRUE(G.hasNode(0));\n\tEXPECT_EQ(198u, G.numberOfNodes());\n\tEXPECT_EQ(2742u, G.numberOfEdges());\n}\n\n\nTEST_F(IOGTest, testMETISGraphReaderWithIsolatedNode) {\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/example.graph\");\n\tEXPECT_EQ(4u, G.numberOfNodes());\n\tEXPECT_EQ(2u, G.numberOfEdges());\n\tEXPECT_TRUE(G.hasNode(0));\n\tEXPECT_TRUE(G.hasNode(1));\n\tEXPECT_TRUE(G.hasNode(2));\n\tEXPECT_TRUE(G.hasNode(3));\n\tEXPECT_TRUE(G.hasEdge(0,1));\n\tEXPECT_TRUE(G.hasEdge(0,3));\n}\n\n\nTEST_F(IOGTest, tryReadingLFR) {\n\tstd::string graphPath;\n\tstd::string clustPath;\n\n\tstd::cout << \"[INPUT] LFR graph file path >\" << std::endl;\n\tstd::getline(std::cin, graphPath);\n\n\tstd::cout << \"[INPUT] clustering file path >\" << std::endl;\n\tstd::getline(std::cin, clustPath);\n\n\tEdgeListReader graphReader('\\t',1);\n\tEdgeListPartitionReader clusteringReader;\n\n\tGraph G = graphReader.read(graphPath);\n\tPartition truth = clusteringReader.read(clustPath);\n\n\tPLP PLP(G);\n\tPLP.run();\n\tPartition zeta = PLP.getPartition();\n\n\tModularity mod;\n\tINFO(\"static clustering quality: \" , mod.getQuality(zeta, G));\n\tINFO(\"static clustering number of clusters: \" , zeta.numberOfSubsets());\n\tINFO(\"ground truth quality: \" , mod.getQuality(truth, G));\n\tINFO(\"ground truth number of clusters: \" , truth.numberOfSubsets());\n\n}\n\n\nTEST_F(IOGTest, tryReadingSNAP) {\n\tstd::string graphPath;\n\n\tstd::cout << \"[INPUT] SNAP graph file path >\" << std::endl;\n\tstd::getline(std::cin, graphPath);\n\n\tEdgeListReader graphReader(' ', 1);\n\n\tGraph G = graphReader.read(graphPath);\n\n\tINFO(\"n = \" , G.numberOfNodes());\n\tINFO(\"m = \" , G.numberOfEdges());\n\n}\n\n\nTEST_F(IOGTest, testSNAPGraphWriter) {\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/jazz.graph\");\n\n\tSNAPGraphWriter writer;\n\twriter.write(G, \"output/SNAPGraphWriter.gr\");\n}\n\n\n/* obsolete as FastMETISGraphReader has been removed */\n/*\nTEST_F(IOGTest, testFastMETISGraphReader) {\n\tFastMETISGraphReader reader;\n\tGraph G = reader.read(\"input/PGPgiantcompo.graph\");\n\n\tEXPECT_EQ(10680u, G.numberOfNodes());\n\tEXPECT_EQ(24316u, G.numberOfEdges());\n\n\tGraph W = reader.read(\"input/lesmis.graph\");\n\n\tEXPECT_EQ(77u, W.numberOfNodes());\n\tEXPECT_EQ(254u, W.numberOfEdges());\n}*/\n\n\nTEST_F(IOGTest, tryReadingMETISFile) {\n\tstd::string graphPath;\n\tstd::cout << \"[INPUT] graph file path >\" << std::endl;\n\tstd::getline(std::cin, graphPath);\n\n\tMETISGraphReader reader;\n\tGraph G = reader.read(graphPath);\n\n\tEXPECT_TRUE(true);\n}\n\nTEST_F(IOGTest, testGMLGraphWriterUndirected) {\n\tstd::string path = \"output/jazz2_undirected.gml\";\n\tGraph G = Graph(5);\n\tG.addEdge(0,2);\n\tG.addEdge(0,1);\n\tG.addEdge(0,0);\n\tG.addEdge(1,1);\n\n\tGMLGraphWriter writer;\n\twriter.write(G,path);\n\tbool exists = false;\n\tstd::ifstream file(path);\n\tif (file) {\n\t\texists = true;\n\t}\n\tEXPECT_TRUE(exists) << \"A file should have been created : \" << path;\n\n\n}\n\nTEST_F(IOGTest, testGMLGraphWriterDirected) {\n\tstd::string path = \"output/jazz2_directed.gml\";\n\tGraph G = Graph(5,false,true);\n\tG.addEdge(0,2);\n\tG.addEdge(0,1);\n\tG.addEdge(0,0);\n\tG.addEdge(1,1);\n\n\tGMLGraphWriter writer;\n\twriter.write(G,path);\n\tbool exists = false;\n\tstd::ifstream file(path);\n\tif (file) {\n\t\texists = true;\n\t}\n\tEXPECT_TRUE(exists) << \"A file should have been created : \" << path;\n\n\n}\n\nTEST_F(IOGTest, testGMLGraphReaderUndirected) {\n\tstd::string path = \"input/jazz2_undirected.gml\";\n\tGMLGraphReader reader;\n\tGraph G = reader.read(path);\n\tEXPECT_EQ(G.numberOfNodes(),5) << \"number of nodes is not correct\";\n\tEXPECT_TRUE(G.hasEdge(0,2));\n\tEXPECT_TRUE(G.hasEdge(0,1));\n\tEXPECT_TRUE(G.hasEdge(0,0));\n\tEXPECT_TRUE(G.hasEdge(1,1));\n\tEXPECT_FALSE(G.isDirected());\n\tEXPECT_TRUE(G.hasEdge(2,0));\n\tEXPECT_TRUE(G.hasEdge(1,0));\n}\n\nTEST_F(IOGTest, testGMLGraphReaderDirected) {\n\tstd::string path = \"input/jazz2_directed.gml\";\n\tGMLGraphReader reader;\n\tGraph G = reader.read(path);\n\tEXPECT_EQ(G.numberOfNodes(),5) << \"number of nodes is not correct\";\n\tEXPECT_TRUE(G.hasEdge(0,2));\n\tEXPECT_TRUE(G.hasEdge(0,1));\n\tEXPECT_TRUE(G.hasEdge(0,0));\n\tEXPECT_TRUE(G.hasEdge(1,1));\n\tEXPECT_TRUE(G.isDirected());\n\tEXPECT_FALSE(G.hasEdge(2,0));\n\tEXPECT_FALSE(G.hasEdge(1,0));\n\n}\n\nTEST_F(IOGTest, testGraphToolBinaryReader) {\n\tstd::string path = \"input/power.gt\";\n\tGraphToolBinaryReader reader;\n\tGraph G = reader.read(path);\n\tEXPECT_EQ(4941,G.numberOfNodes());\n\tEXPECT_EQ(6594,G.numberOfEdges());\n\tEXPECT_FALSE(G.isDirected());\n}\n\nTEST_F(IOGTest, testGraphToolBinaryWriter) {\n\tGraph G(10,false,false);\n\tG.addEdge(0,1);\n\tG.addEdge(2,1);\n\tG.addEdge(2,3);\n\tG.addEdge(3,4);\n\tG.addEdge(5,4);\n\tG.addEdge(5,6);\n\tG.addEdge(7,6);\n\tG.addEdge(8,6);\n\tG.addEdge(7,8);\n\tG.addEdge(9,8);\n\tG.addEdge(9,0);\n\tGraphToolBinaryReader reader;\n\tGraphToolBinaryWriter writer;\n\tstd::string path = \"output/test.gt\";\n\twriter.write(G,path);\n\tGraph Gread = reader.read(path);\n\tEXPECT_EQ(G.numberOfNodes(),Gread.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(),Gread.numberOfEdges());\n\tEXPECT_EQ(G.isDirected(),Gread.isDirected());\n\tEXPECT_EQ(G.isWeighted(),Gread.isWeighted());\n}\n\nTEST_F(IOGTest, testGraphToolBinaryWriterWithDeletedNodes) {\n\tGraph G(10,false,false);\n\tG.removeNode(0);\n\tG.addEdge(2,1);\n\tG.addEdge(2,3);\n\tG.removeNode(4);\n\tG.addEdge(5,6);\n\tG.addEdge(7,6);\n\tG.addEdge(8,6);\n\tG.addEdge(7,8);\n\tG.removeNode(9);\n\tGraphToolBinaryReader reader;\n\tGraphToolBinaryWriter writer;\n\tstd::string path = \"output/test.gt\";\n\twriter.write(G,path);\n\tGraph Gread = reader.read(path);\n\tEXPECT_EQ(G.numberOfNodes(),Gread.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(),Gread.numberOfEdges());\n\tEXPECT_EQ(G.isDirected(),Gread.isDirected());\n\tEXPECT_EQ(G.isWeighted(),Gread.isWeighted());\n}\n\nTEST_F(IOGTest, testGraphToolBinaryWriterDirected) {\n\tGraph G(10,false,true);\n\tG.addEdge(0,1);\n\tG.addEdge(2,1);\n\tG.addEdge(2,3);\n\tG.addEdge(3,4);\n\tG.addEdge(5,4);\n\tG.addEdge(5,6);\n\tG.addEdge(7,6);\n\tG.addEdge(8,6);\n\tG.addEdge(7,8);\n\tG.addEdge(9,8);\n\tG.addEdge(9,0);\n\tGraphToolBinaryReader reader;\n\tGraphToolBinaryWriter writer;\n\tstd::string path = \"output/test.gt\";\n\twriter.write(G,path);\n\tGraph Gread = reader.read(path);\n\tEXPECT_EQ(G.numberOfNodes(),Gread.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(),Gread.numberOfEdges());\n\tEXPECT_EQ(G.isDirected(),Gread.isDirected());\n\tEXPECT_EQ(G.isWeighted(),Gread.isWeighted());\n}\n\nTEST_F(IOGTest, testGraphToolBinaryWriterWithDeletedNodesDirected) {\n\tGraph G(10,false,true);\n\tG.removeNode(0);\n\tG.addEdge(2,1);\n\tG.addEdge(2,3);\n\tG.removeNode(4);\n\tG.addEdge(5,6);\n\tG.addEdge(7,6);\n\tG.addEdge(8,6);\n\tG.addEdge(7,8);\n\tG.removeNode(9);\n\tGraphToolBinaryReader reader;\n\tGraphToolBinaryWriter writer;\n\tstd::string path = \"output/test.gt\";\n\twriter.write(G,path);\n\tGraph Gread = reader.read(path);\n\tEXPECT_EQ(G.numberOfNodes(),Gread.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(),Gread.numberOfEdges());\n\tEXPECT_EQ(G.isDirected(),Gread.isDirected());\n\tEXPECT_EQ(G.isWeighted(),Gread.isWeighted());\n}\n\n} /* namespace NetworKit */\n\n#endif /* NOGTEST */\n" }, { "alpha_fraction": 0.6589595079421997, "alphanum_fraction": 0.6859344840049744, "avg_line_length": 16.89655113220215, "blob_id": "f2e1d5137ebbd45a231421d8a485a4c2f3b1e09b", "content_id": "2490da74c1010cf758c52c7ded1da0360736e000", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 519, "license_type": "permissive", "max_line_length": 60, "num_lines": 29, "path": "/networkit/cpp/graph/test/GraphBuilderBenchmark.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphBuilderBenchmark.cpp\n *\n * Created on: 04.12.2014\n * Author: Marvin Ritter ([email protected])\n */\n\n#ifndef NOGTEST\n\n#include \"GraphBuilderBenchmark.h\"\n#include \"../../io/METISGraphReader.h\"\n\nnamespace NetworKit {\n\n\nGraphBuilderBenchmark::GraphBuilderBenchmark() {\n}\n\nTEST_F(GraphBuilderBenchmark, benchmarkMETISReader) {\n\tMETISGraphReader reader;\n\tmeasureInMs([&]() {\n\t\tauto G = reader.read(\"../algoDaten/graphs/eu-2005.graph\");\n\t\treturn G.numberOfNodes();\n\t}, 20);\n}\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.682741105556488, "alphanum_fraction": 0.692893385887146, "avg_line_length": 19.205127716064453, "blob_id": "934ba61ab697d3f17abbdf4395a98b105ba7ef6c", "content_id": "571f4c8329a16e6937fb249e9d4efe56335f5af1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 788, "license_type": "permissive", "max_line_length": 192, "num_lines": 39, "path": "/networkit/cpp/numerics/LAMG/Level/EliminationStage.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * EliminationStage.cpp\n *\n * Created on: 09.01.2015\n * Author: Michael\n */\n\n#include \"EliminationStage.h\"\n\nnamespace NetworKit {\n\nEliminationStage::EliminationStage(const CSRMatrix &P, const Vector &q, const std::vector<index> &fSet, const std::vector<index> &cSet) : P(P), R(P.transpose()), q(q), fSet(fSet), cSet(cSet) {\n}\n\nconst CSRMatrix& EliminationStage::getP() const {\n\treturn P;\n}\n\nconst CSRMatrix& EliminationStage::getR() const {\n\treturn R;\n}\n\nconst Vector& EliminationStage::getQ() const {\n\treturn q;\n}\n\nconst std::vector<index>& EliminationStage::getFSet() const {\n\treturn fSet;\n}\n\nconst std::vector<index>& EliminationStage::getCSet() const {\n\treturn cSet;\n}\n\ncount EliminationStage::getN() const {\n\treturn fSet.size() + cSet.size();\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6940494775772095, "alphanum_fraction": 0.7045813798904419, "avg_line_length": 30.131147384643555, "blob_id": "b8a066bc44a73e9483f60de2ea0e0dd63c211d89", "content_id": "4bafdc525781e19700d737720dd660ea2bd44df0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1899, "license_type": "permissive", "max_line_length": 142, "num_lines": 61, "path": "/networkit/cpp/centrality/LocalClusteringCoefficient.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LocalClusteringCoefficient.h\n *\n * Created on: 31.03.2015\n * Author: maxv\n */\n\n#ifndef LOCALCLUSTERINGCOEFFICIENT_H_\n#define LOCALCLUSTERINGCOEFFICIENT_H_\n\n#include \"Centrality.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n */\nclass LocalClusteringCoefficient: public NetworKit::Centrality {\npublic:\n\t/**\n\t * Constructs the LocalClusteringCoefficient class for the given Graph @a G. If the local clustering coefficient scores should be normalized,\n\t * then set @a normalized to <code>true</code>. The graph may not contain self-loops. \n\t *\n\t * There are two algorithms available. The trivial (parallel) algorithm needs only a small amount of additional memory.\n\t * The turbo mode adds a (sequential, but fast) pre-processing step using ideas from [0]. This reduces the running time\n\t * significantly for most graphs. However, the turbo mode needs O(m) additional memory. In practice this should be a bit\n\t * less than half of the memory that is needed for the graph itself. The turbo mode is particularly effective for graphs\n\t * with nodes of very high degree and a very skewed degree distribution.\n\t *\n\t * [0] Triangle Listing Algorithms: Back from the Diversion\n\t * Mark Ortmann and Ulrik Brandes *\n\t * 2014 Proceedings of the Sixteenth Workshop on Algorithm Engineering and Experiments (ALENEX). 2014, 1-8\n\t *\n\t * @param G The graph.\n\t * @param turbo If the turbo mode shall be activated.\n\t */\n\tLocalClusteringCoefficient(const NetworKit::Graph &G, bool turbo = false);\n\n\n\n\t/**\n\t* Compute the local clustering coefficient.\n\t*\n\t*/\n\tvoid run() override;\n\n\n\t/**\n\t* Get the theoretical maximum of centrality score in the given graph.\n\t*\n\t* @return The maximum centrality score.\n\t*/\n\tvirtual double maximum() override;\nprotected:\n\tbool turbo;\n\n};\n\n} /* namespace NetworKit */\n\n#endif /* LOCALCLUSTERINGCOEFFICIENT_H_ */\n" }, { "alpha_fraction": 0.8785714507102966, "alphanum_fraction": 0.8785714507102966, "avg_line_length": 86.5, "blob_id": "6d826b248246d06aafa58cf8957f7a8b75457d68", "content_id": "c046fa804e48f3fa8cd33d25e216e43cd8a4fc98", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 700, "license_type": "permissive", "max_line_length": 548, "num_lines": 8, "path": "/networkit/generators.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "\"\"\"\nThis module provides graph generators that produce synthetic networks according to various models.\n\"\"\"\n\n__author__ = \"Christian Staudt\"\n\n# extension imports\nfrom _NetworKit import BarabasiAlbertGenerator, PubWebGenerator, ErdosRenyiGenerator, ClusteredRandomGraphGenerator, DorogovtsevMendesGenerator, DynamicPubWebGenerator, DynamicPathGenerator, ChungLuGenerator, HyperbolicGenerator, DynamicHyperbolicGenerator, HavelHakimiGenerator, DynamicDorogovtsevMendesGenerator, RmatGenerator, DynamicForestFireGenerator, RegularRingLatticeGenerator, WattsStrogatzGenerator, PowerlawDegreeSequence, EdgeSwitchingMarkovChainGenerator, EdgeSwitchingMarkovChainGenerator as ConfigurationModelGenerator, LFRGenerator\n" }, { "alpha_fraction": 0.7084967494010925, "alphanum_fraction": 0.7228758335113525, "avg_line_length": 21.5, "blob_id": "2c937d6b7fb131a8735ade1320a95184a34c1bee", "content_id": "76aaaaa358772f02c2d7d9ebd0e02fac9c5c12f8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 765, "license_type": "permissive", "max_line_length": 148, "num_lines": 34, "path": "/networkit/cpp/numerics/GaussSeidelRelaxation.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GaussSeidelRelaxation.h\n *\n * Created on: 27.10.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef GAUSSSEIDELRELAXATION_H_\n#define GAUSSSEIDELRELAXATION_H_\n\n#include \"Smoother.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup numerics\n * Implementation of the Gauss-Seidel smoother.\n */\nclass GaussSeidelRelaxation : public Smoother {\n\nprivate:\n\tdouble tolerance;\n\npublic:\n\tGaussSeidelRelaxation(double tolerance=1e-15);\n\n\tVector relax(const CSRMatrix &A, const Vector &b, const Vector &initialGuess, const count maxIterations = std::numeric_limits<count>::max()) const;\n\tVector relax(const CSRMatrix &A, const Vector &b, const count maxIterations = std::numeric_limits<count>::max()) const;\n\n};\n\n} /* namespace NetworKit */\n\n#endif /* GAUSSSEIDELRELAXATION_H_ */\n" }, { "alpha_fraction": 0.6925040483474731, "alphanum_fraction": 0.6962172389030457, "avg_line_length": 48.52873611450195, "blob_id": "e79500e39e60a529e76321bfce281bc344931899", "content_id": "1bc0fd6cfe097bb20dfd27c20c5edc111bb345c2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4313, "license_type": "permissive", "max_line_length": 250, "num_lines": 87, "path": "/Doc/release_preparations.mdown", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "# Release preparations\n\n## Tags\n\nA tag is nothing more than a “symbolic name” for a revision. In NetworKit tags are used to mark release versions in the `default` branch, with a `MAJOR.MINOR` version name scheme.\n\n## Releasing New Features\n\nWhen new features should be released, the `Dev` branch is merged into the `default` branch. Additional testing and cleanup is performed before that happens. The new major or minor release is then tagged with a version number.\n\n ______________________________________________________ Dev\n / ^ new feature prepare release ^ \\ < merge Dev into default\n ____/________________________________________\\______________ default\n ^ tag version\n\n\nExample:\n\n hg up Dev\n hg com -m \"ready for release X.Y\"\n hg up default\n hg merge Dev\n hg com -m \"release X.Y\"\n\n## Development related\n\n1. Optional, if not already done during ongoing development:\n * Pull new features/code from forks into the Dev branch.\n * Merge branches into the Dev branch.\n2. Make sure, that the all the Unit tests (option '-t/--tests') run properly.\n3. If not done in the branches and forks: Document changes relevant to the user in the appropriate files (markdown, PDF, IPython notebook).\n4. Make sure, that the user guide Notebook runs properly.\n5. Update version number in __init__.py and version.py.\n6. Merge Dev branch into default (release) branch. [hg update default; hg merge Dev]\n7. Set version tag in default branch. [hg tag MAJOR.MINOR]\n8. Merge default branch back into Dev branch. It's not really clear, if this necessary at all. [hg update Dev; hg merge default]\n9. Optional: Update version number to MAJOR.MINOR.Dev in the Dev branch to indicate that the the ongoing development in the Dev branch is newer than the latest release.\n\n\n## PyPI related\n1. Make sure there is an up-to-date cythonized version of `_NetworKit.pyx`. This can be achieved with `cython -3 --cplus -o networkit/_NetworKit.cpp networkit/_NetworKit.pyx `.\n2. Run `python setup.py sdist upload -r test` to create the package NetworKit and upload it to the PyPI test server.[*]\n3. Do a test installation with `[sudo] pip[3] install -i https://testpypi.python.org/pypi networkit [--upgrade]` (on multiple systems, if possible).\n4. Upload it to the real PyPI with: `python setup.py sdist upload`\n\n## Website related\n* Write a news item.\n* Update \"getting started\" page, if Readme.md has changed. \n* Update files (networkit.zip, PDF, user guide,...) and their links.\n* Update \"documentation\" page and C++ and Python documentations.\n\n## Misc\n* Write a mail including release notes to the mailing list.\n\n## .pypirc\n[*] In order for the uploads to [test-]PyPI to work easily, you need an account with the appropriate rights. Then, it's also recommended to create a file named `.pypirc` in your home folder with the following content:\n\n```\n [distutils]\n index-servers =\n pypi\n test\n\n [test]\n repository = https://testpypi.python.org/pypi\n username = name\n password = password\n\n [pypi]\n repository = https://pypi.python.org/pypi\n username = name\n password = password\n```\n\n\n## Website related details\n\n### Requirements:\n* Access to the NetworKit Website VM.\n* Access to the NetworKit Website CMS.\n* Doxygen, Sphinx and pandoc to create the documentation as well as markdown to PDF conversion.\n\n### Workflows:\n* Download the latest release [choose the latest release tag] via algohub, rename it to `networkit.zip`, upload it to your ITI account with `scp` and then from there to the NetworKit VM. Finally, replace the old file.\n* The same works for the documentation. Create it locally by running `./Doc/docs/make_docs.sh` and upload the ZIP as described above. Also, don't forget to unzip it in the right folder to update the documentation linked on the website.\n* To create the PDFs, you can run `./Doc/docs2.pdf` and upload them together with the UserGuide notebook as described above.\n* To update the `Getting started` page on the website it should be sufficient to paste the Readme.mdown as the CMS [GetSimple] is supposed to have markdown support. If that doesn't work, use pandoc to convert the markdown to html and paste it as raw.\n" }, { "alpha_fraction": 0.7644859552383423, "alphanum_fraction": 0.7644859552383423, "avg_line_length": 58.44444274902344, "blob_id": "ebceb81a9ce24cc4abce47e8612fa1f4f9a4e164", "content_id": "ac5ec611a393744ce210d3c624ea280b07d9ab74", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 535, "license_type": "permissive", "max_line_length": 231, "num_lines": 9, "path": "/Doc/doc/contact.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "=======\nContact\n=======\n\n.. |mailinglist| image:: resources/mailinglist.png\n\nQuestions and feedback regarding NetworKit should be addressed to the NetworKit mailing list |mailinglist|. We encourage all users to `register <https://lists.ira.uni-karlsruhe.de/mailman/listinfo/networkit>`_ for the mailing list.\n\nNetworKit is maintained by the `Research Group Parallel Computing <http://parco.iti.kit.edu>`_ of the Institute of Theoretical Informatics at `Karlsruhe Institute of Technology (KIT) <http://www.kit.edu/english/index.php>`_.\n" }, { "alpha_fraction": 0.5961828231811523, "alphanum_fraction": 0.6072325706481934, "avg_line_length": 20.106006622314453, "blob_id": "940832b199dd15dc6b771f38b1cbd4d4c9e363f3", "content_id": "d2effd1252aea8dcb96ba847f41952d44b853d11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5973, "license_type": "permissive", "max_line_length": 99, "num_lines": 283, "path": "/networkit/cpp/graph/test/GraphBenchmark.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphBenchmark.cpp\n *\n * Created on: 01.02.2013\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#include \"GraphBenchmark.h\"\n#include \"../../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\nGraphBenchmark::GraphBenchmark() {\n\tthis->n = 1000;\n\tINFO(\"n = \" , this->n);\n}\n\nGraphBenchmark::~GraphBenchmark() {\n\t// TODO Auto-generated destructor stub\n}\n\n\n// TASK: benchmark edge insertions standard vs raw\n\nTEST_F(GraphBenchmark, edgeInsertions_noop_seq) {\n\tint64_t n = this->n;\n\tAux::Timer runtime;\n\n\tGraph G(n);\n\tint64_t i = 0;\n\truntime.start();\n\tG.forNodePairs([&](node u, node v) {\n\t\ti++;\n\t\t// G.insertEdge(u, v);\n\t});\n\truntime.stop();\n\n\tTRACE(\"counted i = \" , i);\n\n\tINFO(\"[DONE] edgeInsertions_noop_seq (\" , runtime.elapsed().count() , \" ms)\");\n\n}\n\nTEST_F(GraphBenchmark, edgeInsertions_noop_par) {\n\tint64_t n = this->n;\n\tAux::Timer runtime;\n\n\tGraph G(n);\n\tint64_t i = 0;\n\truntime.start();\n\tG.parallelForNodePairs([&](node u, node v) {\n\t\ti++;\n\t\t// G.insertEdge(u, v);\n\t});\n\truntime.stop();\n\n\tTRACE(\"counted i = \" , i);\n\n\tINFO(\"[DONE] edgeInsertions_noop_par (\" , runtime.elapsed().count() , \" ms)\");\n\n}\n\nTEST_F(GraphBenchmark, edgeInsertions_standard_seq) {\n\tcount n = this->n;\n\tAux::Timer runtime;\n\n\tGraph G(n);\n\truntime.start();\n\tG.forNodePairs([&](node u, node v) {\n\t\tG.addEdge(u, v);\n\t});\n\truntime.stop();\n\n\tINFO(\"[DONE] edgeInsertions_standard_seq (\" , runtime.elapsed().count() , \" ms)\");\n\tEXPECT_EQ((n * (n-1)) / 2, G.numberOfEdges());\n\n\n}\n\n//TEST_F(GraphBenchmark, edgeInsertions_standard_par) {\n//\tint64_t n = this->n;\n//\tAux::Timer runtime;\n//\n//\tGraph G(n);\n//\truntime.start();\n//\tG.parallelForNodePairs([&](node u, node v) {\n//\t\tG.insertEdge(u, v);\n//\t});\n//\truntime.stop();\n//\n//\tINFO(\"[DONE] edgeInsertions_standard_par(\" , runtime.elapsed().count() , \" ms)\");\n//\tEXPECT_EQ((n * (n-1)) / 2, G.numberOfEdges());\n//\n//}\n//\n//TEST_F(GraphBenchmark, edgeInsertions_raw_seq) {\n//\tint64_t n = this->n;\n//\tAux::Timer runtime;\n//\n//\tGraph G(n);\n//\tstinger* S = G.asSTINGER();\n//\n//\truntime.start();\n//\tfor (node u = 1; u <= n; ++u) {\n//\t\tfor (node v = u + 1; v <= n; ++v) {\n//\t\t\tstinger_insert_edge_pair(S, G.defaultEdgeType, u, v, G.defaultEdgeWeight, G.defaultTimeStamp);\n//\t\t}\n//\t}\n//\truntime.stop();\n//\n//\n//\tINFO(\"[DONE] edgeInsertions_raw_seq (\" , runtime.elapsed().count() , \" ms)\");\n//\tEXPECT_EQ((n * (n-1)) / 2, G.numberOfEdges());\n//\n//\n//}\n\n//TEST_F(GraphBenchmark, edgeInsertions_raw_par) {\n//\tint64_t n = this->n;\n//\tAux::Timer runtime;\n//\n//\tGraph G(n);\n//\tstinger* S = G.asSTINGER();\n//\n//\truntime.start();\n//\t#pragma omp parallel\n//\tfor (node u = 1; u <= n; ++u) {\n//\t\tfor (node v = u + 1; v <= n; ++v) {\n//\t\t\tstinger_insert_edge_pair(S, G.defaultEdgeType, u, v, G.defaultEdgeWeight, G.defaultTimeStamp);\n//\t\t}\n//\t}\n//\truntime.stop();\n//\n//\tINFO(\"[DONE] edgeInsertions_raw_par (\" , runtime.elapsed().count() , \" ms)\");\n//\tEXPECT_EQ((n * (n-1)) / 2, G.numberOfEdges());\n//\n//}\n\n\n\n\n// Task: precompute incident weights with different methods\n\n\n\nTEST_F(GraphBenchmark, weightedDegree_standard_seq) {\n\tint64_t n = this->n;\n\tGraph G(n);\n\tG.forNodePairs([&](node u, node v){\n\t\tG.addEdge(u,v);\n\t});\n\n\tAux::Timer runtime;\n\n\truntime.start();\n\tstd::vector<double> weightedDegree(n, 0.0);\n\n\tG.forNodes([&](node v) {\n\t\tweightedDegree[v] = G.weightedDegree(v);\n\t});\n\truntime.stop();\n\n\tINFO(\"[DONE] (\" , runtime.elapsed().count() , \" ms)\");\n\n\t// test correctness of result\n\tbool correct = true;\n\tG.forNodes([&](node v){\n\t\tcorrect &= (weightedDegree[v] == (n - 1));\n\t});\n\n\tEXPECT_TRUE(correct);\n}\n\n\n// TEST: use different containers\n// RESULT: NodeMap, vector and array are about equally fast\n\n\n// TEST: parallelize\n\nTEST_F(GraphBenchmark, weightedDegree_standard_par) {\n\tint64_t n = this->n;\n\tGraph G(n);\n\tG.forNodePairs([&](node u, node v){\n\t\tG.addEdge(u,v);\n\t});\n\n\tAux::Timer runtime;\n\n\truntime.start();\n\tstd::vector<double> weightedDegree(n, 0.0);\n\n\tG.parallelForNodes([&](node v) {\n\t\tweightedDegree[v] = G.weightedDegree(v);\n\t});\n\truntime.stop();\n\n\tINFO(\"[DONE] (\" , runtime.elapsed().count() , \" ms)\");\n\n\t// test correctness of result\n\tbool correct = true;\n\tG.forNodes([&](node v){\n\t\tcorrect &= (weightedDegree[v] == (n - 1));\n\t});\n\n\tEXPECT_TRUE(correct);\n}\n\n\n// RESULT: significant super-linear speedup regardless of target container\n\n//TEST_F(GraphBenchmark, weightedDegree_raw_seq) {\n//\tint64_t n = this->n;\n//\tGraphGenerator graphGen;\n//\tGraph G = graphGen.makeCompleteGraph(n);\n//\tstinger* S = G.asSTINGER();\n//\n//\tAux::Timer runtime;\n//\n//\truntime.start();\n//\tNodeMap<double> weightedDegree(n, 0.0);\n//\n//\tfor (node v = 1; v <= n; ++v) {\n//\t\tdouble iw = 0.0;\n//\t\tSTINGER_READ_ONLY_FORALL_EDGES_OF_VTX_BEGIN(S, v) {\n//\t\t\tiw += stinger_edgeweight(S, STINGER_EDGE_SOURCE, STINGER_EDGE_DEST, G.defaultEdgeType);\n//\t\t} STINGER_READ_ONLY_FORALL_EDGES_OF_VTX_END();\n//\t\tweightedDegree[v] = iw;\n//\t}\n//\truntime.stop();\n//\n//\tINFO(\"[DONE] (\" , runtime.elapsed().count() , \" ms)\");\n//\n//\t// test correctness of result\n//\tbool correct = true;\n//\tG.forNodes([&](node v){\n//\t\tcorrect &= (weightedDegree[v] == (n - 1));\n//\t});\n//\n//\tEXPECT_TRUE(correct);\n//\n//}\n\n//\n//TEST_F(GraphBenchmark, weightedDegree_raw_par) {\n//\tint64_t n = this->n;\n//\tGraphGenerator graphGen;\n//\tGraph G = graphGen.makeCompleteGraph(n);\n//\tstinger* S = G.asSTINGER();\n//\n//\tAux::Timer runtime;\n//\n//\truntime.start();\n//\tNodeMap<double> weightedDegree(n, 0.0);\n//\n//\t#pragma omp parallel for\n//\tfor (node v = 1; v <= n; ++v) {\n//\t\tdouble iw = 0.0;\n//\t\tSTINGER_READ_ONLY_FORALL_EDGES_OF_VTX_BEGIN(S, v) {\n//\t\t\tiw += stinger_edgeweight(S, STINGER_EDGE_SOURCE, STINGER_EDGE_DEST, G.defaultEdgeType);\n//\t\t} STINGER_READ_ONLY_FORALL_EDGES_OF_VTX_END();\n//\t\tweightedDegree[v] = iw;\n//\t}\n//\truntime.stop();\n//\n//\tINFO(\"[DONE] (\" , runtime.elapsed().count() , \" ms)\");\n//\n//\t// test correctness of result\n//\tbool correct = true;\n//\tG.forNodes([&](node v){\n//\t\tcorrect &= (weightedDegree[v] == (n - 1));\n//\t});\n//\n//\tEXPECT_TRUE(correct);\n//\n//}\n\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6615146994590759, "alphanum_fraction": 0.6738794445991516, "avg_line_length": 15.589743614196777, "blob_id": "786df3e164d20fb1f7c15a67ec33a027a99a8040", "content_id": "41c88ee2b8a764956521466c09e262f9b8f34026", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 647, "license_type": "permissive", "max_line_length": 54, "num_lines": 39, "path": "/networkit/cpp/generators/quadtree/test/QuadTreeGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * QuadTreeGTest.h\n *\n * Created on: 28.05.2014\n * Author: Moritz v. Looz ([email protected])\n */\n\n#ifndef QUADTREETEST_H_\n#define QUADTREETEST_H_\n\n#include <gtest/gtest.h>\n#include <cmath>\n#include <vector>\n\n#include \"../Quadtree.h\"\n\nusing std::vector;\n\nnamespace NetworKit {\n\nclass QuadTreeGTest: public testing::Test {\npublic:\n\tQuadTreeGTest() = default;\n\tvirtual ~QuadTreeGTest() = default;\n\nprotected:\n\ttemplate <class T>\n\tQuadNode<T> getRoot(Quadtree<T> &tree) {\n\t\treturn tree.root;\n\t}\n\n\ttemplate <class T>\n\tvector<QuadNode<T> > getChildren(QuadNode<T> &node) {\n\t\treturn node.children;\n\t}\n};\n\n} /* namespace NetworKit */\n#endif /* QUADTREETEST_H_ */\n" }, { "alpha_fraction": 0.6078431606292725, "alphanum_fraction": 0.6339869499206543, "avg_line_length": 11.75, "blob_id": "5a99b25448b08ec80450ac712b620108bcc1da8a", "content_id": "7e9c366aef854f11cd281d625f0bc739142c8dc2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 306, "license_type": "permissive", "max_line_length": 39, "num_lines": 24, "path": "/networkit/cpp/graph/test/APSPGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * APSPGTest.h\n *\n * Created on: 07.07.2015\n * Author: Arie Slobbe\n */\n\n#ifndef NOGTEST\n\n#ifndef APSPGTEST_H_\n#define APSPGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass APSPGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* APSPGTEST_H_ */\n\n#endif /* NOGTEST */\n" }, { "alpha_fraction": 0.6538461446762085, "alphanum_fraction": 0.6719456911087036, "avg_line_length": 14.785714149475098, "blob_id": "8e8875dd0f01ac1ee21fa3ecc6b6ba0635a77866", "content_id": "06a249d8108cd9d88b2522684f98f5bf4cc5ec2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 442, "license_type": "permissive", "max_line_length": 49, "num_lines": 28, "path": "/networkit/cpp/independentset/test/IndependentSetGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * IndependentSetTest.h\n *\n * Created on: 27.02.2013\n * Author: Christian Staudt ([email protected])\n */\n#ifndef NOGTEST\n\n#ifndef INDEPENDENTSETGTEST_H_\n#define INDEPENDENTSETGTEST_H_\n\n\n#include <gtest/gtest.h>\n\n#include \"../../graph/Graph.h\"\n#include \"../../independentset/Luby.h\"\n\nnamespace NetworKit {\n\nclass IndependentSetGTest: public testing::Test {\npublic:\n\n};\n\n} /* namespace NetworKit */\n#endif /* INDEPENDENTSETTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.7523077130317688, "alphanum_fraction": 0.7615384459495544, "avg_line_length": 21.413793563842773, "blob_id": "6401478a8c879cea1dcf34db4e8774d58a67a710", "content_id": "5e9621cda043102471d7d81646c7a20cb616cc7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 650, "license_type": "permissive", "max_line_length": 73, "num_lines": 29, "path": "/networkit/cpp/centrality/test/SpanningEdgeCentralityGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SpanningEdgeCentralityGTest.h\n *\n * Created on: Jan 17, 2016\n * Author: Michael\n */\n\n#ifndef NETWORKIT_CPP_CENTRALITY_TEST_SPANNINGEDGECENTRALITYGTEST_H_\n#define NETWORKIT_CPP_CENTRALITY_TEST_SPANNINGEDGECENTRALITYGTEST_H_\n\n#include \"gtest/gtest.h\"\n#include \"../SpanningEdgeCentrality.h\"\n\n#include <vector>\n#include <string>\n\nnamespace NetworKit {\n\nusing namespace std;\n\nclass SpanningEdgeCentralityGTest : public testing::Test {\npublic:\n\tSpanningEdgeCentralityGTest() = default;\n\tvirtual ~SpanningEdgeCentralityGTest() = default;\n};\n\n} /* namespace NetworKit */\n\n#endif /* NETWORKIT_CPP_CENTRALITY_TEST_SPANNINGEDGECENTRALITYGTEST_H_ */\n" }, { "alpha_fraction": 0.5838218331336975, "alphanum_fraction": 0.5932004451751709, "avg_line_length": 21.447368621826172, "blob_id": "7c829f2bfd6b5f89fa9ebb87b5c5e9a7dd5c9a09", "content_id": "0570f13fa1daf22ee081135dd24bcf8db64f478e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 853, "license_type": "permissive", "max_line_length": 110, "num_lines": 38, "path": "/networkit/cpp/io/EdgeListWriter.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * EdgeListWriter.cpp\n *\n * Created on: 18.06.2013\n * Author: cls\n */\n\n#include \"EdgeListWriter.h\"\n#include \"../auxiliary/Log.h\"\n\n#include <sstream>\n\n#include \"../auxiliary/Enforce.h\"\n\nnamespace NetworKit {\n\nEdgeListWriter::EdgeListWriter(char separator, node firstNode) : separator(separator), firstNode(firstNode) {}\n\nvoid EdgeListWriter::write(const Graph& G, std::string path) {\n std::ofstream file(path);\n Aux::enforceOpened(file);\n\n if (G.isWeighted()) {\n G.forEdges([&](node u, node v, double weight){\n file << (u + firstNode) << separator << (v + firstNode) << separator << weight << std::endl;\n });\n } else {\n G.forEdges([&](node u, node v){\n \tfile << (u + firstNode) << separator << (v + firstNode) << std::endl;\n });\n }\n\n\n file.close();\n\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6407766938209534, "alphanum_fraction": 0.6569579243659973, "avg_line_length": 18.935483932495117, "blob_id": "061706c785d72537031aa4b61a755d80ec71bcde", "content_id": "2d7dafec683a4857ec40929fc6c5835e19512446", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 618, "license_type": "permissive", "max_line_length": 67, "num_lines": 31, "path": "/networkit/cpp/graph/APSP.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * APSP.cpp\n *\n * Created on: 07.07.2015\n * Author: Arie Slobbe\n */\n\n#include \"APSP.h\"\n#include \"../auxiliary/Log.h\"\n#include \"Dijkstra.h\"\n\nnamespace NetworKit {\n\nAPSP::APSP(const Graph& G) : Algorithm(), G(G) {}\n\nvoid APSP::run() {\n\tstd::vector<edgeweight> distanceVector(G.upperNodeIdBound(), 0.0);\n\tdistances.resize(G.upperNodeIdBound(), distanceVector);\n\tG.parallelForNodes([&](node u){\n\t\tDijkstra dijk(G, u);\n\t\tdijk.run();\n\t\tdistances[u] = dijk.getDistances();\n\t});\n\thasRun = true;\n}\n\nstd::string NetworKit::APSP::toString() const {\n\treturn \"All-Pairs Shortest Path Algorithm\";\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6133509874343872, "alphanum_fraction": 0.6384665966033936, "avg_line_length": 21.92424201965332, "blob_id": "dbc2b08ee3f4ae3781f938220ebf2db88793eebf", "content_id": "9f9b45b7c5be4b9d14356dbae9a1aad65913fce6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3026, "license_type": "permissive", "max_line_length": 77, "num_lines": 132, "path": "/networkit/cpp/community/test/CommunityDetectionBenchmark.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CommunityDetectionBenchmark.h\n *\n * Created on: 16.05.2014\n * Author: Klara Reichard ([email protected]), Marvin Ritter ([email protected])\n */\n\n#ifndef NOGTEST\n\n#include <map>\n#include <functional>\n\n#include \"CommunityDetectionBenchmark.h\"\n#include \"../PLP.h\"\n#include \"../PLM.h\"\n#include \"../Modularity.h\"\n#include \"../../centrality/Betweenness.h\"\n#include \"../../centrality/PageRank.h\"\n#include \"../../auxiliary/Timer.h\"\n#include \"../../structures/Partition.h\"\n\nnamespace NetworKit {\n\nconstexpr int runs = 20;\n\nvoid CommunityDetectionBenchmark::SetUp() {\n\n}\n\nTEST_F(CommunityDetectionBenchmark, timeClusteringAlgos) {\n\tAux::Timer timer;\n\tModularity mod;\n\n\t// std::string graph = \"../graphs/in-2004.graph\";\n\t// std::string graph = \"../graphs/uk-2002.graph\";\n\tstd::string graph = \"../graphs/uk-2007-05.graph\";\n\n\tprintf(\"Reading graph file %s ...\\n\", graph.c_str());\n\ttimer.start();\n\tconst Graph G = this->metisReader.read(graph);\n\ttimer.stop();\n\tprintf(\"Reading graph took %.1f s\\n\", timer.elapsedMilliseconds() / 1000.0);\n\n\tfor (int r = 0; r < runs; r++) {\n\t\tGraph Gcopy = G;\n\t\tPLP algo(Gcopy);\n\n\t\ttimer.start();\n\t\talgo.run();\n\t\tPartition zeta = algo.getPartition();\n\t\ttimer.stop();\n\n\t\tauto communitySizes = zeta.subsetSizes();\n\n\n\t\tprintf(\"%s on %s: %.1f s\\n\\t# communities: %lu\\n\\tmodularity: %f\\n\",\n\t\t\t\"Parallel Label Propagation\", graph.c_str(),\n\t\t\ttimer.elapsedMilliseconds() / 1000.0,\n\t\t\tzeta.numberOfSubsets(),\n\t\t\tmod.getQuality(zeta, G));\n\t}\n\n\tfor (int r = 0; r < runs; r++) {\n\t\tGraph Gcopy = G;\n\t\tPLM algo(Gcopy);\n\n\t\ttimer.start();\n\t\talgo.run();\n\t\tPartition zeta = algo.getPartition();\n\t\ttimer.stop();\n\n\t\tauto communitySizes = zeta.subsetSizes();\n\n\n\t\tprintf(\"%s on %s: %.1f s\\n\\t# communities: %lu\\n\\tmodularity: %f\\n\",\n\t\t\t\"Parallel Louvain\", graph.c_str(),\n\t\t\ttimer.elapsedMilliseconds() / 1000.0,\n\t\t\tzeta.numberOfSubsets(),\n\t\t\tmod.getQuality(zeta, G));\n\t}\n}\n\nTEST_F(CommunityDetectionBenchmark, timePageRankCentrality) {\n\tAux::Timer timer;\n\n\tstd::string graph = \"../graphs/uk-2002.graph\";\n\tconst Graph G = this->metisReader.read(graph);\n\n\tfor (int r = 0; r < runs; r++) {\n\t\tPageRank cen(G, 1e-6);\n\n\t\ttimer.start();\n\t\tcen.run();\n\t\ttimer.stop();\n\t\tauto ranking = cen.ranking();\n\n\n\t\tprintf(\"%s on %s: %.1f s\\n\\tranking: [(%lu: %f), (%lu: %f), ...]\\n\",\n\t\t\t\"Page Rank Centrality\", graph.c_str(),\n\t\t\ttimer.elapsedMilliseconds() / 1000.0,\n\t\t\tranking[0].first, ranking[0].second,\n\t\t\tranking[1].first, ranking[1].second);\n\t}\n}\n\nTEST_F(CommunityDetectionBenchmark, timeBetweennessCentrality) {\n\tAux::Timer timer;\n\n\tstd::string graph = \"../graphs/cond-mat-2005.graph\";\n\tconst Graph G = this->metisReader.read(graph);\n\n\tfor (int r = 0; r < runs; r++) {\n\t\tBetweenness cen(G);\n\n\t\ttimer.start();\n\t\tcen.run();\n\t\ttimer.stop();\n\t\tauto ranking = cen.ranking();\n\n\n\t\tprintf(\"%s on %s: %.1f s\\n\\tranking: [(%lu: %f), (%lu: %f), ...]\\n\",\n\t\t\t\"Betweenness Centrality\", graph.c_str(),\n\t\t\ttimer.elapsedMilliseconds() / 1000.0,\n\t\t\tranking[0].first, ranking[0].second,\n\t\t\tranking[1].first, ranking[1].second);\n\t}\n}\n\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.5857605338096619, "alphanum_fraction": 0.6116504669189453, "avg_line_length": 10.44444465637207, "blob_id": "e0608780c12809347f5187c097efa670994da06f", "content_id": "7ec4cf76de886744a0896b3e0c51c541f48ba1a0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 309, "license_type": "permissive", "max_line_length": 37, "num_lines": 27, "path": "/networkit/cpp/io/test/IOGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * IOGTest.h\n *\n * Created on: 12.12.2012\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef IOGTEST_H_\n#define IOGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass IOGTest: public testing::Test {\n\n};\n\n\n\n} /* namespace NetworKit */\n#endif /* IOGTEST_H_ */\n\n\n#endif /* NOGTEST */\n" }, { "alpha_fraction": 0.6835066676139832, "alphanum_fraction": 0.6953937411308289, "avg_line_length": 17.69444465637207, "blob_id": "3fd48e398f6994fb6cf3dbd9de26041c7410c5f8", "content_id": "91170cb0d8934120abf7bfb79d955d6f6b56e0a6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 673, "license_type": "permissive", "max_line_length": 78, "num_lines": 36, "path": "/networkit/cpp/numerics/LAMG/Level/LevelAggregation.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LevelAggregation.h\n *\n * Created on: 10.01.2015\n * Author: Michael\n */\n\n#ifndef LEVELAGGREGATION_H_\n#define LEVELAGGREGATION_H_\n\n#include \"Level.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup numerics\n */\nclass LevelAggregation : public Level {\nprivate:\n\tCSRMatrix P; // interpolation matrix (n x nc)\n\tCSRMatrix R; // restriction matrix (nc x n)\n\npublic:\n\tLevelAggregation(const CSRMatrix &A, const CSRMatrix &P, const CSRMatrix &R);\n\n\tvoid coarseType(const Vector &xf, Vector &xc) const;\n\n\tvoid restrict(const Vector &bf, Vector &bc) const;\n\n\tvoid interpolate(const Vector &xc, Vector &xf) const;\n\n};\n\n} /* namespace NetworKit */\n\n#endif /* LEVELAGGREGATION_H_ */\n" }, { "alpha_fraction": 0.6971428394317627, "alphanum_fraction": 0.7047619223594666, "avg_line_length": 17.75, "blob_id": "05253e0040cbaefd6f331d5abd51e0af8c6a619a", "content_id": "1f7ad56006b2cbec2edd7069a9380711bfc00a48", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1050, "license_type": "permissive", "max_line_length": 74, "num_lines": 56, "path": "/networkit/cpp/components/StronglyConnectedComponents.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * StronglyConnectedComponents.h\n *\n * Created on: 01.06.2014\n * Author: Klara Reichard ([email protected]), Marvin Ritter ([email protected])\n */\n\n#ifndef STRONGLYCONNECTEDCOMPONENTS_H_\n#define STRONGLYCONNECTEDCOMPONENTS_H_\n\n#include \"../graph/Graph.h\"\n#include \"../structures/Partition.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup components\n * Determines the strongly connected components of an directed graph.\n */\nclass StronglyConnectedComponents {\npublic:\n\tStronglyConnectedComponents(const Graph& G);\n\n\t/**\n\t * This method determines the connected components for the graph g.\n\t */\n\tvoid run();\n\n\t/**\n\t * This method returns the number of connected components.\n\t */\n\tcount numberOfComponents();\n\n\t/**\n\t * This method returns the the component in which node query is situated.\n\t *\n\t * @param[in]\tquery\tthe node whose component is asked for\n\t */\n\tcount componentOfNode(node u);\n\n\n\t/**\n\t * Return a Partition that represents the components\n\t */\n\tPartition getPartition();\n\n\nprivate:\n\tconst Graph& G;\n\tPartition component;\n};\n\n}\n\n\n#endif /* STRONGLYCONNECTEDCOMPONENTS_H_ */\n" }, { "alpha_fraction": 0.6854974627494812, "alphanum_fraction": 0.6939291954040527, "avg_line_length": 23.204082489013672, "blob_id": "936f6bd96278e089275a80a36241369017963518", "content_id": "e83eadeaf45877658c78c0af4f1adb5816c485d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1186, "license_type": "permissive", "max_line_length": 115, "num_lines": 49, "path": "/networkit/cpp/centrality/Closeness.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Closeness.h\n *\n * Created on: 03.10.2014\n * Author: nemes\n */\n\n#ifndef CLOSENESS_H_\n#define CLOSENESS_H_\n\n#include \"Centrality.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n */\nclass Closeness: public NetworKit::Centrality {\npublic:\n\t/**\n\t * Constructs the Closeness class for the given Graph @a G. If the closeness scores should be normalized,\n\t * then set @a normalized to <code>true</code>. The run() method takes O(nm) time, where n is the number\n\t * of nodes and m is the number of edges of the graph. \n\t *\n\t * @param G The graph.\n\t * @param normalized Set this parameter to <code>true</code> if scores should be normalized in the interval [0,1].\n\t * @param\tcheckConnectedness\tturn this off if you know the graph is connected\n\t *\n\t * TODO: extend definition of closeness to disconnected graphs\n\t */\n\tCloseness(const Graph& G, bool normalized=false, bool checkConnectedness=true);\n\n\n\n\t/**\n\t* Compute closeness scores parallel\n\t*\n\t*/\n\tvoid run() override;\n\n\t/*\n\t * Returns the maximum possible Closeness a node can have in a graph with the same amount of nodes (=a star)\n\t */\n\tdouble maximum() override;\n};\n\n} /* namespace NetworKit */\n\n#endif /* CLOSENESS_H_ */\n" }, { "alpha_fraction": 0.6439909338951111, "alphanum_fraction": 0.6621315479278564, "avg_line_length": 12.78125, "blob_id": "1948b423d5bb0f46606332aafd6c13d2aba4e67a", "content_id": "14c7119147ba4d3ed1a1a448474451ebe72923a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 441, "license_type": "permissive", "max_line_length": 48, "num_lines": 32, "path": "/networkit/cpp/overlap/test/OverlapGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * OverlapGTest.h\n *\n * Created on: 21.12.2012\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef OVERLAPGTEST_H_\n#define OVERLAPGTEST_H_\n\n#include <gtest/gtest.h>\n#include <functional>\n\n#include \"../HashingOverlapper.h\"\n#include \"../../community/ClusteringGenerator.h\"\n\n\nnamespace NetworKit {\n\nclass OverlapGTest: public testing::Test {\n\n};\n\n\n\n\n} /* namespace NetworKit */\n#endif /* OVERLAPGTEST_H_ */\n\n#endif /* NOGTEST */\n" }, { "alpha_fraction": 0.6149400472640991, "alphanum_fraction": 0.6213613152503967, "avg_line_length": 29.535947799682617, "blob_id": "7247abe7155e8227cc1d156c46a878615de56d5e", "content_id": "35b7be5a6c4b82947a3cb1ff981f66058e0cdd28", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4674, "license_type": "permissive", "max_line_length": 144, "num_lines": 153, "path": "/networkit/cpp/Unittests-X.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "//============================================================================\n// Name : Unittests-X.cpp\n// Author : Christian Staudt ([email protected]),\n//\t\t Henning Meyerhenke ([email protected])\n// Version :\n// Copyright : � 2012, Christian Staudt, Henning Meyerhenke\n// Description : Calling unit tests and benchmarks\n//============================================================================\n\n// includes\n#include <iostream>\n#include <utility>\n//#include <cfenv>\t// floating point exceptions\n#include <stdexcept>\n\n// GoogleTest\n#ifndef NOGTEST\n#include <gtest/gtest.h>\n#endif\n\n// OpenMP\n#include <omp.h>\n\n// necessary for some reasons?\n#include \"Globals.h\"\n#include \"ext/optionparser.h\"\n#include \"auxiliary/Log.h\"\n#include \"graph/Graph.h\"\n#include \"auxiliary/Parallelism.h\"\n\n\nusing namespace NetworKit;\n\n\n// *** Option Parser Configuration ***//\n\nclass Arg: public OptionParser::Arg {\n\nstatic OptionParser::ArgStatus Required(const OptionParser::Option& option, bool msg)\n{\n if (option.arg != 0)\n return OptionParser::ARG_OK;\n\n if (msg) {\n\t std::cout << \"Option '\" << option << \"' requires an argument\" << std::endl;\n }\n return OptionParser::ARG_ILLEGAL;\n}\n\n};\n\n// TODO: clean up obsolete parameters\nenum optionIndex { UNKNOWN, HELP, LOGLEVEL, THREADS, TESTS, TRIALS, BENCHMARKS, FILTER };\nconst OptionParser::Descriptor usage[] =\n{\n {UNKNOWN, 0,\"\" , \"\" ,OptionParser::Arg::None, \"\"\n \"Options:\" },\n {HELP, 0,\"h\" , \"help\",OptionParser::Arg::None, \" --help \\t Print usage and exit.\" },\n {LOGLEVEL, 0, \"\" , \"loglevel\", OptionParser::Arg::Required, \" --loglevel=<LEVEL> \\t set the log level\" },\n {THREADS, 0, \"\" , \"threads\", OptionParser::Arg::Required, \" --threads=<NUM> \\t set the maximum number of threads\" },\n {TESTS, 0, \"t\", \"tests\", OptionParser::Arg::None, \" --tests \\t Run unit tests\"},\n {TRIALS, 0, \"e\", \"trials\", OptionParser::Arg::None, \" --trials \\t Run experimental tests\"},\n {BENCHMARKS, 0, \"b\", \"benchmarks\", OptionParser::Arg::None, \" --benchmarks \\t Run benchmarks\"},\n {FILTER, 0, \"f\", \"gtest_filter\", OptionParser::Arg::Required, \" --gtest_filter=<FILTER_PATTERN> \\t Run tests that match the filter pattern\" },\n {UNKNOWN, 0,\"\" , \"\" ,OptionParser::Arg::None, \"\\nExamples:\\n\"\n \" TODO\" },\n {0,0,0,0,0,0}\n};\n\n\nint main(int argc, char **argv) {\n\tstd::cout << \"*** NetworKit Unit Tests *** \" << std::endl;\n\n\t// ENABLE FLOATING POINT EXCEPTIONS (needs GNU extension, apparently only available on Linux)\n#ifdef _GNU_SOURCE\n\t// feenableexcept(FE_ALL_EXCEPT);\n#endif\n\n\tstd::string program_name = argv[0];\n\t// PARSE OPTIONS\n\targc-=(argc>0); argv+=(argc>0); // skip program name argv[0] if present\n\n\tOptionParser::Stats stats(usage, argc, argv);\n\tstd::vector<OptionParser::Option> options(stats.options_max), buffer(stats.buffer_max);\n\tOptionParser::Parser parse(usage, argc, argv, options.data(), buffer.data());\n\n\tif (parse.error())\n\t return 1;\n\n\tif (options[HELP]) {\n\t OptionParser::printUsage(std::cout, usage);\n\t return 0;\n\t}\n\n\tfor (OptionParser::Option* opt = options[UNKNOWN]; opt; opt = opt->next())\n\t std::cout << \"Unknown option: \" << opt->name << \"\\n\";\n\n\tfor (int i = 0; i < parse.nonOptionsCount(); ++i)\n\t std::cout << \"Non-option #\" << i << \": \" << parse.nonOption(i) << \"\\n\";\n\n\n\n\n\t// CONFIGURE LOGGING\n\n\n#ifndef NOLOGGING\n\tif (options[LOGLEVEL]) {\n\t\tAux::Log::setLogLevel(options[LOGLEVEL].arg);\n\t\tAux::Log::Settings::setPrintLocation(true);\n\t} else {\n\t\tAux::Log::setLogLevel(\"ERROR\");\t// with default level\n\t\tAux::Log::Settings::setPrintLocation(true);\n\t}\n#endif\n\n\n\t// CONFIGURE PARALLELISM\n\n#ifdef _OPENMP\n\tomp_set_nested(1); // enable nested parallelism\n#endif\n\n\tif (options[THREADS]) {\n\t\t// set number of threads\n\t\tint nThreads = std::atoi(options[THREADS].arg);\n\t\tAux::setNumberOfThreads(nThreads);\n\t}\n\t\n\t// get program name (currently only for unix)\n\tauto pos = program_name.find_last_of(\"/\");\n\tprogram_name = program_name.substr(pos+1,program_name.length()-1);\n\n#ifndef NOGTEST\n\tif (options[TESTS]) {\n\t\t::testing::GTEST_FLAG(filter) = \"*Test.test*\";\n\t} else if (options[TRIALS]) {\n\t\t::testing::GTEST_FLAG(filter) = \"*Test.try*\";\n\t} else if (options[BENCHMARKS]) {\n\t\tif (program_name != \"NetworKit-Tests-O\") {\n\t\t\tstd::cout << \"Hint: Performance tests should be run in optimized mode\" << std::endl;\n\t\t}\n\t\t::testing::GTEST_FLAG(filter) = \"*Benchmark*\";\n\t} else if (options[FILTER]) {\n\t\t::testing::GTEST_FLAG(filter) = options[FILTER].arg;\n\t}\n\t::testing::InitGoogleTest(&argc, argv);\n\tINFO(\"=== starting unit tests ===\");\n\treturn RUN_ALL_TESTS();\n#else\n\t throw std::runtime_error(\"unit tests are excluded from build by the NOGTEST preprocessor directive\");\n#endif\n}\n" }, { "alpha_fraction": 0.6019563674926758, "alphanum_fraction": 0.6079759001731873, "avg_line_length": 19.44615364074707, "blob_id": "a66be7060483fe8a3e47f07196b6410301623fd7", "content_id": "c1a2b7a82c3ea0a5e2b1f66cea43770aa0f4973e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1329, "license_type": "permissive", "max_line_length": 71, "num_lines": 65, "path": "/networkit/cpp/dynamics/GraphUpdater.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphUpdater.cpp\n *\n * Created on: 27.12.2013\n * Author: cls\n */\n\n#include \"GraphUpdater.h\"\n#include \"../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\nGraphUpdater::GraphUpdater(Graph& G) : G(G) {\n}\n\nvoid GraphUpdater::update(std::vector<GraphEvent>& stream) {\n\tfor (GraphEvent ev : stream) {\n\t\tTRACE(\"event: \" , ev.toString());\n\t\tswitch (ev.type) {\n\t\t\tcase GraphEvent::NODE_ADDITION : {\n\t\t\t\tG.addNode();\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcase GraphEvent::NODE_REMOVAL : {\n\t\t\t\tG.removeNode(ev.u);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcase GraphEvent::NODE_RESTORATION :{\n\t\t\t\tG.restoreNode(ev.u);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcase GraphEvent::EDGE_ADDITION : {\n\t\t\t\tG.addEdge(ev.u, ev.v, ev.w);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcase GraphEvent::EDGE_REMOVAL : {\n\t\t\t\tG.removeEdge(ev.u, ev.v);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcase GraphEvent::EDGE_WEIGHT_UPDATE : {\n\t\t\t\tG.setWeight(ev.u, ev.v, ev.w);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcase GraphEvent::EDGE_WEIGHT_INCREMENT : {\n\t\t\t\tG.setWeight(ev.u, ev.v, G.weight(ev.u, ev.v) + ev.w);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tcase GraphEvent::TIME_STEP : {\n\t\t\t\tG.timeStep();\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tdefault: {\n\t\t\t\tthrow std::runtime_error(\"unknown event type\");\n\t\t\t}\n\t\t}\n\t}\n\t// record graph size\n\tsize.push_back(std::make_pair(G.numberOfNodes(), G.numberOfEdges()));\n}\n\nstd::vector<std::pair<count, count> > GraphUpdater::getSizeTimeline() {\n\treturn size;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6411150097846985, "alphanum_fraction": 0.6689895391464233, "avg_line_length": 14.105262756347656, "blob_id": "f908614f116da637bf15834b5536d9e187bae22e", "content_id": "159d5919510a439eb7e820b7618a5db51714e44f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 287, "license_type": "permissive", "max_line_length": 43, "num_lines": 19, "path": "/networkit/cpp/graph/test/SpanningGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SpanningGTest.h\n *\n * Created on: 03.09.2015\n * Author: Henning\n */\n\n#ifndef SPANNINGGTEST_H_\n#define SPANNINGGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass SpanningGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n#endif /* SPANNINGGTEST_H_ */\n" }, { "alpha_fraction": 0.6207970380783081, "alphanum_fraction": 0.6338729858398438, "avg_line_length": 21.30555534362793, "blob_id": "d3593761223fcc3ea2925b2b7ec9a143903ab1b7", "content_id": "3991aaa9a10fc45c0cdc7f0e82e8ea88af6a424f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1606, "license_type": "permissive", "max_line_length": 90, "num_lines": 72, "path": "/networkit/cpp/centrality/KatzCentrality.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * KatzCentrality.cpp\n *\n * Created on: 09.01.2015\n * Author: Henning\n */\n\n#include \"KatzCentrality.h\"\n#include \"../auxiliary/NumericTools.h\"\n\nnamespace NetworKit {\n\nKatzCentrality::KatzCentrality(const Graph& G, double alpha, double beta, double tol):\n\t\tCentrality(G, true), alpha(alpha), beta(beta), tol(tol)\n{\n\n}\n\nvoid KatzCentrality::run() {\n\tcount z = G.upperNodeIdBound();\n\tstd::vector<double> values(z, 1.0);\n\tscoreData = values;\n\tdouble length = 0.0;\n\tdouble oldLength = 0.0;\n\n\tauto converged([&](double val, double other) {\n\t\t// compute residual\n\t\treturn (Aux::NumericTools::equal(val, other, tol));\n\t});\n\n\tdo {\n\t\toldLength = length;\n\n\t\t// iterate matrix-vector product\n\t\tG.parallelForNodes([&](node u) {\n\t\t\tvalues[u] = 0.0;\n\t\t\t// note: inconsistency in definition in Newman's book (Ch. 7) regarding directed graphs\n\t\t\t// we follow the verbal description, which requires to sum over the incoming edges\n\t\t\tG.forInEdgesOf(u, [&](node v, edgeweight ew) {\n\t\t\t\tvalues[u] += ew * scoreData[v];\n\t\t\t});\n\t\t\tvalues[u] *= alpha;\n\t\t\tvalues[u] += beta;\n\t\t});\n\n\t\t// normalize values\n\t\tlength = 0.0;\n\t\tlength = G.parallelSumForNodes([&](node u) {\n\t\t\treturn (values[u] * values[u]);\n\t\t});\n\t\tlength = sqrt(length);\n\t\tG.parallelForNodes([&](node u) {\n\t\t\tvalues[u] /= length;\n\t\t});\n\n//\t\tTRACE(\"length: \", length);\n//\t\tTRACE(values);\n\n\t\tscoreData = values;\n\t} while (! converged(length, oldLength));\n\n\thasRun = true;\n\n//\t// check sign and correct if necessary\n//\tif (scoreData[0] < 0) {\n//\t\tG.parallelForNodes([&](node u) {\n//\t\t\tscoreData[u] = fabs(scoreData[u]);\n//\t\t});\n//\t}\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.767128586769104, "alphanum_fraction": 0.7755966186523438, "avg_line_length": 73.25714111328125, "blob_id": "5db93a49639a6a20da29fe7d32d1fb4be1b2d7d0", "content_id": "147d3de11cc198842423e422d470b7b30eb1a416", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2598, "license_type": "permissive", "max_line_length": 227, "num_lines": 35, "path": "/Doc/student_exercises_and_projects.mdown", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "### Student Exercises\n\nNetworKit is currently also used as a teaching tool. This section describes the workflow for student teams. Suppose the course is named, \"Networks 101\", then there will be a dedicated branch `Networks101` for student exercises.\n\n1. Fork the main repository via [algohub.iti.kit.edu](http://algohub.iti.kit.edu) and name the fork according to your team. (On the repository page, click `Options -> Fork`)\n2. Make sure that the correct access rights for your team are set. (On the repository page: `Options -> Settings`)\n3. Switch to the appropriate branch for the course (e.g. `hg up Networks101`) and ONLY work on this branch.\n4. Work with the forked repository as you please. Coordinate with your team.\n5. On completion of the exercise, send a pull request from your fork to the main repository. (On the repository page, click `Options -> Create Pull Request`)\n6. The pull request is now under review. Watch for and react to comments from the reviewer.\n\nWe also ask student teams to adhere to the following conventions:\n\n- With multiple teams working on the same exercise, append your team name to the class and file names as well as the names of unit tests to avoid naming clashes.\n- If you plan to make modifications to existing parts of NetworKit, discuss them with the core developers first, e.g. by posting to the [mailing list][list].\n- Delete forked repositories when they are no longer needed.\n\n\n\n### Reviewing Student Exercises\n\nIncoming pull requests appear as notifications on [algohub.iti.kit.edu](http://algohub.iti.kit.edu). It is also possible to receive notifications via e-mail.\n\n\n1. Before the course starts, create an appropriate branch for the course (e.g. `Networks101`). Derive the branch from the `Dev` branch.\n2. Receive pull requests from student teams via [algohub.iti.kit.edu](http://algohub.iti.kit.edu).\n3. To review a pull request, switch to the course branch and pull from the forked repository of the student team. Make sure to pull the revision associated with the pull request (e.g. `hg pull -r<rev> <path/to/forked/repo>`)\n4. If everything is okay, change the status of the pull request to `Accepted` (click `Change Status` above the comment field). The comment field can be used to send feedback, creators of the request will be notified via email.\n\nGood contributions from the student exercises should be merged back into the `Dev` branch.\n\n\n### Student Projects\n\nStudents with long-term projects like Bachelor's or Master's theses should familiarize themselves with the guidelines and select a forking/branching model with their advisor." }, { "alpha_fraction": 0.9200000166893005, "alphanum_fraction": 0.9200000166893005, "avg_line_length": 100, "blob_id": "a3b410dcca89a0d43b71e94db997c22a0f006d29", "content_id": "89fc1f0c91dd911e48965cd60810cea837b7d6e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "permissive", "max_line_length": 100, "num_lines": 1, "path": "/networkit/components.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "from _NetworKit import ConnectedComponents, ParallelConnectedComponents, StronglyConnectedComponents" }, { "alpha_fraction": 0.6756756901741028, "alphanum_fraction": 0.6918919086456299, "avg_line_length": 27.461538314819336, "blob_id": "62be6a3e0b555391aa871ba131a166255ed927bc", "content_id": "27dd0f7a11c26fb8c341400c233b590234cf68c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1480, "license_type": "permissive", "max_line_length": 131, "num_lines": 52, "path": "/networkit/cpp/scd/test/SelectiveCDGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "#include \"SelectiveCDGTest.h\"\n\n#include \"../PageRankNibble.h\"\n#include \"../../community/Modularity.h\"\n#include \"../../community/Conductance.h\"\n#include \"../../graph/Graph.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../auxiliary/Log.h\"\n\n#ifndef NOGTEST\n\nnamespace NetworKit {\n\nTEST_F(SCDGTest2, testPageRankNibble) {\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/hep-th.graph\");\n\t// parameters\n\tnode seed = 50;\n\tstd::set<unsigned int> seeds = {(unsigned int) seed};\n\tdouble alpha = 0.1; // loop (or teleport) probability, changed due to DGleich from: // phi * phi / (225.0 * log(100.0 * sqrt(m)));\n\tdouble epsilon = 1e-5; // changed due to DGleich from: pow(2, exponent) / (48.0 * B);\n\n\tPageRankNibble prn(G, alpha, epsilon);\n\tcount idBound = G.upperNodeIdBound();\n\n\t// run PageRank-Nibble and partition the graph accordingly\n\tDEBUG(\"Call PageRank-Nibble(\", seed, \")\");\n\tauto result = prn.run(seeds);\n\tauto cluster = result[seed];\n\n\t// prepare result\n\tEXPECT_GT(cluster.size(), 0u);\n\tPartition partition(idBound);\n\tpartition.allToOnePartition();\n\tpartition.toSingleton(50);\n\tindex id = partition[seed];\n\tfor (auto entry: cluster) {\n\t\tpartition.moveToSubset(id, entry);\n\t}\n\n\t// evaluate result\n\tConductance conductance;\n\tdouble targetCond = 0.4;\n\tdouble cond = conductance.getQuality(partition, G);\n\tEXPECT_LT(cond, targetCond);\n\tINFO(\"Conductance of PR-Nibble: \", cond, \"; cluster size: \", cluster.size());\n}\n\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6378233432769775, "alphanum_fraction": 0.7270294427871704, "avg_line_length": 25.690475463867188, "blob_id": "7cf904b11133e729dc7efcddb84a71c1c388635d", "content_id": "c8f04ba74f7befb8ef73f421016ca74b5f36e745", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1121, "license_type": "permissive", "max_line_length": 100, "num_lines": 42, "path": "/networkit/cpp/Globals.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Globals.h\n *\n * Created on: 06.02.2013\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef GLOBALS_H_\n#define GLOBALS_H_\n\n#include <cstdint>\n#include <cmath>\n#include <limits>\n\n#include \"ext/ttmath/ttmath.h\"\n\n\nnamespace NetworKit {\n\t/** Typedefs **/\n\ttypedef uint64_t index; // more expressive name for an index into an array\n\ttypedef uint64_t count; // more expressive name for an integer quantity\n\ttypedef ttmath::Big<TTMATH_BITS(64),TTMATH_BITS(64)> bigfloat;\t// big floating point number\n\ttypedef index node; // node indices are 0-based\n\ttypedef double edgeweight; // edge weight type\n\ttypedef index edgeid;\t// edge id\n\n\t/** Constants **/\n\tconstexpr index none = std::numeric_limits<index>::max(); // value for not existing nodes/edges\n\tconstexpr edgeweight defaultEdgeWeight = 1.0;\n\tconstexpr edgeweight nullWeight = 0.0;\n}\n\n#ifdef __INTEL_COMPILER\nconstexpr double PI = 3.141592653589793238462643383279502884197169399375105820974944592307816406286;\n#else\nconst double PI = 2.0*std::acos(0);\n#endif\n\n// CODE STYLE GUIDELINES: Do not rely on global variables for algorithm parametrization.\n\n\n#endif /* GLOBALS_H_ */\n" }, { "alpha_fraction": 0.6603773832321167, "alphanum_fraction": 0.6792452931404114, "avg_line_length": 14.142857551574707, "blob_id": "12590d11bd37bb0a845f730f4bb87d9716b472f0", "content_id": "133aee1ccc3893c21018aad070980f9213a56972", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 424, "license_type": "permissive", "max_line_length": 53, "num_lines": 28, "path": "/networkit/cpp/coarsening/test/CoarseningBenchmark.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CoarseningGTest.h\n *\n * Created on: 20.12.2012\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef COARSENINGBENCHMARK_H_\n#define COARSENINGBENCHMARK_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\n/**\n * googletest test fixture for the coarsening module.\n */\nclass CoarseningBenchmark: public testing::Test {\n\n};\n\n\n} /* namespace NetworKit */\n#endif /* COARSENINGBENCHMARK_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.5986207127571106, "alphanum_fraction": 0.6082758903503418, "avg_line_length": 21.65625, "blob_id": "de1322ebd42dbb72466f560a493b68ea913ff057", "content_id": "76f39c63f9a40b9fec06115a91675c17ae01464e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2175, "license_type": "permissive", "max_line_length": 98, "num_lines": 96, "path": "/networkit/cpp/graph/DynBFS.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynBFS.cpp\n *\n * Created on: 17.07.2014\n * Author: cls, ebergamini\n */\n\n#include \"BFS.h\"\n#include \"DynBFS.h\"\n#include \"../auxiliary/Log.h\"\n#include <queue>\n\n\nnamespace NetworKit {\n\nDynBFS::DynBFS(const Graph& G, node s, bool storePredecessors) : DynSSSP(G, s, storePredecessors),\ncolor(G.upperNodeIdBound(), WHITE) {\n}\n\nvoid DynBFS::run() {\n\tBFS bfs(G, source, true);\n\tbfs.run();\n\tdistances = bfs.distances;\n\tnpaths = bfs.npaths;\n\tif (storePreds)\n\t\tprevious = bfs.previous;\n\tmaxDistance = 0;\n\tG.forNodes([&](node v){\n\t\tif (distances[v] > maxDistance)\n\t\t\tmaxDistance = distances[v];\n\t});\n\tmaxDistance++;\n}\n\nvoid DynBFS::update(const std::vector<GraphEvent>& batch) {\n\tmod = false;\n\tstd::vector<std::queue<node> > queues(maxDistance);\n\n\t// insert nodes from the batch whose distance has changed (affected nodes) into the queues\n\tfor (GraphEvent edge : batch) {\n\t\tif (edge.type!=GraphEvent::EDGE_ADDITION || edge.w!=1.0)\n\t\t\tthrow std::runtime_error(\"Graph update not allowed\");\n\t\tif (distances[edge.u] >= distances[edge.v]+1) {\n\t\t\tqueues[distances[edge.v]+1].push(edge.u);\n\t\t} else if (distances[edge.v] >= distances[edge.u]+1) {\n\t\t\tqueues[distances[edge.u]+1].push(edge.v);\n\t\t}\n\t}\n\n\t// extract nodes from the queues and scan incident edges\n\tstd::queue<node> visited;\n\tcount m = 1;\n\twhile (m < maxDistance) {\n\t\tDEBUG(\"m = \", m);\n\t\twhile (!queues[m].empty()) {\n\t\t\tmod = true;\n\t\t\tnode w = queues[m].front();\n\t\t\tDEBUG(\"node \", w);\n\t\t\tqueues[m].pop();\n\t\t\tif (color[w] == BLACK) {\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tvisited.push(w);\n\t\t\tcolor[w] = BLACK;\n\t\t\tdistances[w] = m;\n\t\t\tif (storePreds) {\n\t\t\t\tprevious[w].clear();\n\t\t\t}\n\t\t\tnpaths[w] = 0;\n\t\t\tG.forInNeighborsOf(w, [&](node w, node z) {\n\t\t\t\t//z is a predecessor for w\n\t\t\t\tif (distances[w] == distances[z]+1) {\n\t\t\t\t\tif (storePreds) {\n\t\t\t\t\t\tprevious[w].push_back(z);\n\t\t\t\t\t}\n\t\t\t\t\tnpaths[w] += npaths[z];\n\t\t\t\t}\n\t\t\t\t//w is a predecessor for z\n\t\t\t\telse if (color[z] == WHITE && distances[z] >= distances[w]+1 ) {\n\t\t\t\t\tcolor[z] = GRAY;\n\t\t\t\t\tqueues[m+1].push(z);\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t\tm = m+1;\n\t}\n\n\t// reset colors\n\twhile(!visited.empty()) {\n\t\tnode w = visited.front();\n\t\tvisited.pop();\n\t\tcolor[w] = WHITE;\n\t}\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6798748970031738, "alphanum_fraction": 0.694473385810852, "avg_line_length": 21.302326202392578, "blob_id": "0295eaa7e6c2ac8ac4eb37b2798397f4d1e8558b", "content_id": "6c09d840ae33303874184f246d33e4586dda1552", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 959, "license_type": "permissive", "max_line_length": 93, "num_lines": 43, "path": "/networkit/cpp/centrality/PageRank.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PageRank.h\n *\n * Created on: 19.03.2014\n * Author: Henning\n */\n\n#ifndef PAGERANK_H_\n#define PAGERANK_H_\n\n#include \"Centrality.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n * Compute PageRank as node centrality measure.\n * NOTE: There is an inconsistency in the definition in Newman's book (Ch. 7) regarding\n * directed graphs; we follow the verbal description, which requires to sum over the incoming\n * edges (as opposed to outgoing ones).\n */\nclass PageRank: public NetworKit::Centrality {\nprotected:\n\tdouble damp;\n\tdouble tol;\n\npublic:\n\t/**\n\t * Constructs the PageRank class for the Graph @a G\n\t *\n\t * @param[in] G Graph to be processed.\n\t * @param[in] damp Damping factor of the PageRank algorithm.\n\t * @param[in] tol Error tolerance for PageRank iteration.\n\t */\n\tPageRank(const Graph& G, double damp=0.85, double tol = 1e-8);\n\n\tvirtual void run();\n\n\tvirtual double maximum();\n};\n\n} /* namespace NetworKit */\n#endif /* PAGERANK_H_ */\n" }, { "alpha_fraction": 0.6782841682434082, "alphanum_fraction": 0.6997318863868713, "avg_line_length": 13.920000076293945, "blob_id": "f1a53d4bb1c2644d50fd37f7434f3a16e5a9aec5", "content_id": "d1aa5ef860811f3204987168ba8a14442f3ab81a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 373, "license_type": "permissive", "max_line_length": 53, "num_lines": 25, "path": "/networkit/cpp/sparsification/test/MultiscaleBackboneGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MultiscaleBackboneGTest.h\n *\n * Created on: 20.06.2014\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#ifndef MULTISCALEBACKBONETEST_H_\n#define MULTISCALEBACKBONETEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass MultiscaleBackboneGTest: public testing::Test {\n\n};\n\n\n} /* namespace NetworKit */\n#endif /* MULTISCALEBACKBONETEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.5326438546180725, "alphanum_fraction": 0.5378151535987854, "avg_line_length": 29.940000534057617, "blob_id": "c9d641112b420208b4d79fad1ef59821066401a1", "content_id": "743a5ed8e55a3146e0add9ba1cb3ce3bb7e84989", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1547, "license_type": "permissive", "max_line_length": 181, "num_lines": 50, "path": "/networkit/cpp/dynamics/GraphEvent.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphEvent.cpp\n *\n * Created on: 02.04.2013\n * Author: cls\n */\n\n#include \"GraphEvent.h\"\n\n#include <sstream>\n\nnamespace NetworKit {\n\nGraphEvent::GraphEvent(GraphEvent::Type type, node u, node v, edgeweight w) : type(type), u(u), v(v), w(w) {\n}\n\nstd::string GraphEvent::toString() {\n\tstd::stringstream ss;\n\tif (this->type == GraphEvent::NODE_ADDITION) {\n\t\tss << \"an(\" << u << \")\";\n\t} else if (this->type == GraphEvent::NODE_REMOVAL) {\n\t\tss << \"dn(\" << u << \")\";\n\t} else if (this->type == GraphEvent::NODE_RESTORATION) {\n\t\tss << \"rn(\" << u << \")\";\n\t} else if (this->type == GraphEvent::EDGE_ADDITION) {\n\t\tss << \"ae(\" << u << \",\" << v << \",\" << w << \")\";\n\t} else if (this->type == GraphEvent::EDGE_REMOVAL) {\n\t\tss << \"de(\" << u << \",\" << v << \")\";\n\t} else if (this->type == GraphEvent::EDGE_WEIGHT_UPDATE) {\n\t\tss << \"ce(\" << u << \",\" << v << \")\";\n\t} else if (this->type == GraphEvent::EDGE_WEIGHT_INCREMENT) {\n\t\tss << \"ie(\" << u << \",\" << v << \")\";\n\t} else if (this->type == GraphEvent::TIME_STEP) {\n\t\tss << \"st\";\n\t}\n\treturn ss.str();\n}\n\nbool GraphEvent::compare(GraphEvent a, GraphEvent b) {\n\tif (a.type < b.type || (a.type == b.type && a.u < b.u) || (a.type == b.type && a.u == b.u && a.v < b.v) || (a.type == b.type && a.u == b.u && a.v == b.v && a.w < b.w)) return true;\n\telse return false;\n}\n\nbool GraphEvent::equal(GraphEvent a, GraphEvent b) {\n\tif (a.type == GraphEvent::TIME_STEP && b.type == GraphEvent::TIME_STEP) return true;\n\treturn (a.type == b.type && a.u == b.u && a.v == b.v && a.w == b.w);\n}\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6491769552230835, "alphanum_fraction": 0.6592592597007751, "avg_line_length": 26.851003646850586, "blob_id": "254d7c30498ba3352ad82f4af0cd414b578f6a7a", "content_id": "c55036507bc2eaa8d8e86d0a663f459c798d1c92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9720, "license_type": "permissive", "max_line_length": 189, "num_lines": 349, "path": "/networkit/cpp/numerics/LAMG/SolverLamg.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SolverLamg.cpp\n *\n * Created on: 12.01.2015\n * Author: Michael\n */\n\n#include \"SolverLamg.h\"\n#include \"LAMGSettings.h\"\n\n#include <fstream>\n#include <iostream>\n#include <sstream>\n\n#include \"../../auxiliary/Enforce.h\"\n#include \"../../auxiliary/Timer.h\"\n\nnamespace NetworKit {\n\n#ifndef NDEBUG\ncount SolverLamg::minResTime = 0;\ncount SolverLamg::interpolationTime = 0;\ncount SolverLamg::restrictionTime = 0;\ncount SolverLamg::coarsestSolve = 0;\n#endif\n\nSolverLamg::SolverLamg(LevelHierarchy &hierarchy, const Smoother &smoother) : hierarchy(hierarchy), smoother(smoother), bStages(hierarchy.size(), std::vector<Vector>()) {\n}\n\nvoid SolverLamg::solve(Vector &x, const Vector &b, LAMGSolverStatus &status) {\n\tbStages = std::vector<std::vector<Vector>>(hierarchy.size(), std::vector<Vector>());\n\tif (hierarchy.size() >= 2) {\n\t\tVector bc = b;\n\t\tVector xc = x;\n\t\tint finest = 0;\n\n\t\tif (hierarchy.getType(1) == ELIMINATION) {\n#ifndef NDEBUG\n\t\t\tAux::Timer t; t.start();\n#endif\n\t\t\thierarchy.at(1).restrict(b, bc, bStages[1]);\n\t\t\tif (hierarchy.at(1).getLaplacian().numberOfRows() == 1) {\n\t\t\t\tx = 0.0;\n\t\t\t\treturn;\n\t\t\t} else {\n\t\t\t\thierarchy.at(1).coarseType(x, xc);\n\t\t\t\tfinest = 1;\n\t\t\t}\n#ifndef NDEBUG\n\t\t\tt.stop();\n\t\t\trestrictionTime += t.elapsedMicroseconds();\n#endif\n\t\t}\n\t\tsolveCycle(xc, bc, finest, status);\n\n\t\tif (finest == 1) { // interpolate from finest == ELIMINATION level back to actual finest level\n#ifndef NDEBUG\n\t\t\tAux::Timer t; t.start();\n#endif\n\t\t\thierarchy.at(1).interpolate(xc, x, bStages[1]);\n#ifndef NDEBUG\n\t\t\tt.stop();\n\t\t\tinterpolationTime += t.elapsedMicroseconds();\n#endif\n\t\t} else {\n\t\t\tx = xc;\n\t\t}\n\t} else {\n\t\tsolveCycle(x, b, 0, status);\n\t}\n\n\tdouble residual = (b - hierarchy.at(0).getLaplacian() * x).length();\n\tstatus.residual = residual;\n#ifndef NDEBUG\n\tDEBUG(\"final residual\\t \", residual);\n\tDEBUG(\"minResTime: \", minResTime / 1000);\n\tDEBUG(\"interpolationTime: \", interpolationTime / 1000);\n\tDEBUG(\"restrictionTime: \", restrictionTime / 1000);\n\tDEBUG(\"coarsestSolve: \", coarsestSolve / 1000);\n#endif\n}\n\nvoid SolverLamg::solveCycle(Vector &x, const Vector &b, int finest, LAMGSolverStatus &status) {\n\tAux::Timer timer;\n\ttimer.start();\n\n\t// data structures for iterate recombination\n\thistory = std::vector<std::vector<Vector>>(hierarchy.size());\n\trHistory = std::vector<std::vector<Vector>>(hierarchy.size());\n\tlatestIterate = std::vector<index>(hierarchy.size(), 0);\n\tnumActiveIterates = std::vector<count>(hierarchy.size(), 0);\n\tint coarsest = hierarchy.size() - 1;\n\tstd::vector<count> numVisits(coarsest);\n\tstd::vector<Vector> X(hierarchy.size());\n\tstd::vector<Vector> B(hierarchy.size());\n\n\tfor (index i = 0; i < hierarchy.size(); ++i) {\n\t\thistory[i] = std::vector<Vector>(MAX_COMBINED_ITERATES, Vector(hierarchy.at(i).getNumberOfNodes()));\n\t\trHistory[i] = std::vector<Vector>(MAX_COMBINED_ITERATES, Vector(hierarchy.at(i).getNumberOfNodes()));\n\t}\n\n\tVector r = b - hierarchy.at(finest).getLaplacian() * x;\n\tdouble residual = r.length();\n\tdouble finalResidual = residual * status.desiredResidualReduction;\n\tdouble bestResidual = std::numeric_limits<double>::max();\n\n\tcount iterations = 0;\n\tstatus.residualHistory.emplace_back(residual);\n\tcount noResReduction = 0;\n\twhile (residual > finalResidual && noResReduction < 5 && iterations < status.maxIters && timer.elapsedMilliseconds() <= status.maxConvergenceTime ) {\n#ifndef NDEBUG\n\t\tDEBUG(\"iter \", iterations, \" r=\", residual);\n#endif\n\t\tcycle(x, b, finest, coarsest, numVisits, X, B, status);\n\t\tr = b - hierarchy.at(finest).getLaplacian() * x;\n\t\tresidual = r.length();\n\t\tstatus.residualHistory.emplace_back(residual);\n\t\tif (residual < bestResidual) {\n\t\t\tnoResReduction = 0;\n\t\t\tbestResidual = residual;\n\t\t} else {\n\t\t\t++noResReduction;\n\t\t}\n\t\titerations++;\n\t}\n\n\ttimer.stop();\n\n\tstatus.numIters = iterations;\n\tstatus.residual = r.length();\n\tstatus.converged = r.length() <= finalResidual;\n#ifndef NDEBUG\n\tDEBUG(\"nIter\\t \", iterations);\n#endif\n}\n\nvoid SolverLamg::cycle(Vector &x, const Vector &b, int finest, int coarsest, std::vector<count> &numVisits, std::vector<Vector> &X, std::vector<Vector> &B, const LAMGSolverStatus &status) {\n\tstd::fill(numVisits.begin(), numVisits.end(), 0);\n\tX[finest] = x;\n\tB[finest] = b;\n\n#ifndef NDEBUG\n\tAux::Timer t;\n#endif\n\n\tint currLvl = finest;\n\tint nextLvl = finest;\n\tdouble maxVisits = 0.0;\n\n\tsaveIterate(currLvl, X[currLvl], B[currLvl] - hierarchy.at(currLvl).getLaplacian() * X[currLvl]);\n\twhile (true) {\n\t\tif (currLvl == coarsest) {\n#ifndef NDEBUG\n\t\t\tt.start();\n#endif\n\t\t\tnextLvl = currLvl - 1;\n\t\t\tif (currLvl == finest) { // finest level\n\t\t\t\tX[currLvl] = smoother.relax(hierarchy.at(currLvl).getLaplacian(), B[currLvl], X[currLvl], status.numPreSmoothIters);\n\t\t\t} else {\n\t\t\t\tVector bCoarse(B[currLvl].getDimension()+1, 0.0);\n\t\t\t\tfor (index i = 0; i < B[currLvl].getDimension(); ++i) {\n\t\t\t\t\tbCoarse[i] = B[currLvl][i];\n\t\t\t\t}\n\n\t\t\t\tVector xCoarse = DenseMatrix::LUSolve(hierarchy.getCoarseMatrix(), bCoarse);\n\t\t\t\tfor (index i = 0; i < X[currLvl].getDimension(); ++i) {\n\t\t\t\t\tX[currLvl][i] = xCoarse[i];\n\t\t\t\t}\n\t\t\t}\n#ifndef NDEBUG\n\t\t\tt.stop();\n\t\t\tcoarsestSolve += t.elapsedMicroseconds();\n#endif\n\t\t} else {\n\t\t\tif (currLvl == finest) {\n\t\t\t\tmaxVisits = 1.0;\n\t\t\t} else {\n\t\t\t\tmaxVisits = hierarchy.cycleIndex(currLvl) * numVisits[currLvl-1];\n\t\t\t}\n\n\t\t\tif (numVisits[currLvl] < maxVisits) {\n\t\t\t\tnextLvl = currLvl + 1;\n\t\t\t} else {\n\t\t\t\tnextLvl = currLvl - 1;\n\t\t\t}\n\t\t}\n\n\t\tif (nextLvl < finest) break;\n\n\t\tif (nextLvl > currLvl) { // preProcess\n#ifndef NDEBUG\n\t\t\tt.start();\n#endif\n\t\t\tnumVisits[currLvl]++;\n\n\t\t\tif (hierarchy.getType(nextLvl) != ELIMINATION) {\n\t\t\t\tX[currLvl] = smoother.relax(hierarchy.at(currLvl).getLaplacian(), B[currLvl], X[currLvl], status.numPreSmoothIters);\n\t\t\t}\n\n\t\t\tif (hierarchy.getType(nextLvl) == ELIMINATION) {\n\t\t\t\thierarchy.at(nextLvl).restrict(B[currLvl], B[nextLvl], bStages[nextLvl]);\n\t\t\t} else {\n\t\t\t\thierarchy.at(nextLvl).restrict(B[currLvl] - hierarchy.at(currLvl).getLaplacian() * X[currLvl], B[nextLvl]);\n\t\t\t}\n\n\t\t\thierarchy.at(nextLvl).coarseType(X[currLvl], X[nextLvl]);\n\n\t\t\tclearHistory(nextLvl);\n#ifndef NDEBUG\n\t\t\tt.stop();\n\t\t\trestrictionTime += t.elapsedMicroseconds();\n#endif\n\t\t} else { // postProcess\n\t\t\tif (currLvl == coarsest || hierarchy.getType(currLvl+1) != ELIMINATION) {\n\t\t\t\tminRes(currLvl, X[currLvl], B[currLvl] - hierarchy.at(currLvl).getLaplacian() * X[currLvl]);\n\t\t\t}\n\n#ifndef NDEBUG\n\t\t\tt.start();\n#endif\n\n\t\t\tif (nextLvl > finest) {\n\t\t\t\tsaveIterate(nextLvl, X[nextLvl], B[nextLvl] - hierarchy.at(nextLvl).getLaplacian() * X[nextLvl]);\n\t\t\t}\n\n\n\t\t\tif (hierarchy.getType(currLvl) == ELIMINATION) {\n\t\t\t\thierarchy.at(currLvl).interpolate(X[currLvl], X[nextLvl], bStages[currLvl]);\n\t\t\t} else {\n\t\t\t\tVector xf = X[nextLvl];\n\t\t\t\thierarchy.at(currLvl).interpolate(X[currLvl], xf);\n\t\t\t\tX[nextLvl] += xf;\n\t\t\t}\n\n\t\t\tif (hierarchy.getType(currLvl) != ELIMINATION) {\n\t\t\t\tX[nextLvl] = smoother.relax(hierarchy.at(nextLvl).getLaplacian(), B[nextLvl], X[nextLvl], status.numPostSmoothIters);\n\t\t\t}\n\n#ifndef NDEBUG\n\t\t\tt.stop();\n\t\t\tinterpolationTime += t.elapsedMicroseconds();\n#endif\n\t\t}\n\n\t\tcurrLvl = nextLvl;\n\t} // while\n\n\t// post-cycle finest\n\tif ((int64_t) hierarchy.size() > finest + 1 && hierarchy.getType(finest+1) != ELIMINATION) { // do an iterate recombination on calculated solutions\n\t\tminRes(finest, X[finest], B[finest] - hierarchy.at(finest).getLaplacian() * X[finest]);\n\t}\n\n\n\tX[finest] -= X[finest].mean();\n\tx = X[finest];\n}\n\nvoid SolverLamg::saveIterate(index level, const Vector &x, const Vector &r) {\n\t// update latest pointer\n\tindex i = latestIterate[level];\n\tlatestIterate[level] = (i+1) % MAX_COMBINED_ITERATES;\n\n\n\t// update numIterates\n\tif (numActiveIterates[level] < MAX_COMBINED_ITERATES) {\n\t\tnumActiveIterates[level]++;\n\t}\n\n\t// update history array\n\thistory[level][i] = x;\n\trHistory[level][i] = r;\n}\n\nvoid SolverLamg::clearHistory(index level) {\n\tlatestIterate[level] = 0;\n\tnumActiveIterates[level] = 0;\n}\n\nvoid SolverLamg::minRes(index level, Vector &x, const Vector &r) {\n\tif (numActiveIterates[level] > 0) {\n\t\tcount n = numActiveIterates[level];\n\n\t\tstd::vector<index> ARowIdx(r.getDimension()+1);\n\t\tstd::vector<index> ERowIdx(r.getDimension()+1);\n\n#pragma omp parallel for\n\t\tfor (index i = 0; i < r.getDimension(); ++i) {\n\t\t\tfor (index k = 0; k < n; ++k) {\n\t\t\t\tdouble AEvalue = r[i] - rHistory[level][k][i];\n\t\t\t\tif (std::abs(AEvalue) > 1e-9) {\n\t\t\t\t\t++ARowIdx[i+1];\n\t\t\t\t}\n\n\t\t\t\tdouble Eval = history[level][k][i] - x[i];\n\t\t\t\tif (std::abs(Eval) > 1e-9) {\n\t\t\t\t\t++ERowIdx[i+1];\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor (index i = 0; i < r.getDimension(); ++i) {\n\t\t\tARowIdx[i+1] += ARowIdx[i];\n\t\t\tERowIdx[i+1] += ERowIdx[i];\n\t\t}\n\n\t\tstd::vector<index> AColumnIdx(ARowIdx[r.getDimension()]);\n\t\tstd::vector<double> ANonZeros(ARowIdx[r.getDimension()]);\n\n\t\tstd::vector<index> EColumnIdx(ERowIdx[r.getDimension()]);\n\t\tstd::vector<double> ENonZeros(ERowIdx[r.getDimension()]);\n\n#pragma omp parallel for\n\t\tfor (index i = 0; i < r.getDimension(); ++i) {\n\t\t\tfor (index k = 0, aIdx = ARowIdx[i], eIdx = ERowIdx[i]; k < n; ++k) {\n\t\t\t\tdouble AEvalue = r[i] - rHistory[level][k][i];\n\t\t\t\tif (std::abs(AEvalue) > 1e-9) {\n\t\t\t\t\tAColumnIdx[aIdx] = k;\n\t\t\t\t\tANonZeros[aIdx] = AEvalue;\n\t\t\t\t\t++aIdx;\n\t\t\t\t}\n\n\t\t\t\tdouble Eval = history[level][k][i] - x[i];\n\t\t\t\tif (std::abs(Eval) > 1e-9) {\n\t\t\t\t\tEColumnIdx[eIdx] = k;\n\t\t\t\t\tENonZeros[eIdx] = Eval;\n\t\t\t\t\t++eIdx;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tCSRMatrix AE(r.getDimension(), n, ARowIdx, AColumnIdx, ANonZeros, true);\n\t\tCSRMatrix E(r.getDimension(), n, ERowIdx, EColumnIdx, ENonZeros, true);\n#ifndef NDEBUG\n\tAux::Timer t;\n\tt.start();\n#endif\n\n\t\tVector alpha = smoother.relax(CSRMatrix::mTmMultiply(AE, AE), CSRMatrix::mTvMultiply(AE, r), Vector(n, 0.0), 10);\n\t\tx += E * alpha;\n\n#ifndef NDEBUG\n\tt.stop();\n\tminResTime += t.elapsedMicroseconds();\n#endif\n\t}\n\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6373908519744873, "alphanum_fraction": 0.6430405974388123, "avg_line_length": 23.037036895751953, "blob_id": "abdf68a845ee6de2f3b8d4c4a0c14bdcd1975813", "content_id": "360ff4c5eb9039188898a2d539ab5f67322e610b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1947, "license_type": "permissive", "max_line_length": 146, "num_lines": 81, "path": "/networkit/cpp/graph/Dijkstra.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Dijkstra.cpp\n *\n * Created on: Jul 23, 2013\n * Author: Henning, Christian Staudt\n */\n\n#include \"Dijkstra.h\"\n\n#include <algorithm>\n\nnamespace NetworKit {\n\nDijkstra::Dijkstra(const Graph& G, node source, bool storePaths, bool storeStack, node target) : SSSP(G, source, storePaths, storeStack, target) {\n\n}\n\nvoid Dijkstra::run() {\n\n\tTRACE(\"initializing Dijkstra data structures\");\n\t// init distances\n\tedgeweight infDist = std::numeric_limits<edgeweight>::max();\n\tdistances.clear();\n\tdistances.resize(G.upperNodeIdBound(), infDist);\n\tif (storePaths) {\n\t\tprevious.clear();\n\t\tprevious.resize(G.upperNodeIdBound());\n\t\tnpaths.clear();\n\t\tnpaths.resize(G.upperNodeIdBound(), 0);\n\t\tnpaths[source] = 1;\n\t}\n\n\tif (storeStack) {\n\t\tstd::vector<node> empty;\n\t\tstd::swap(stack, empty);\n\t}\n\tdistances[source] = 0;\n\t// priority queue with distance-node pairs\n\tdistances[source] = 0;\n\tAux::PrioQueue<edgeweight, node> pq(distances);\n\n\n\tauto relax([&](node u, node v, edgeweight w) {\n\t\tif (distances[v] > distances[u] + w) {\n\t\t\tdistances[v] = distances[u] + w;\n\t\t\tif (storePaths) {\n\t\t\t\tprevious[v] = {u}; // new predecessor on shortest path\n\t\t\t\tnpaths[v] = npaths[u];\n\t\t\t}\n\t\t\tTRACE(\"Decreasing key of \", v);\n\t\t\tTRACE(\"pq size: \", pq.size());\n\t\t\tpq.decreaseKey(distances[v], v);\n\t\t\tTRACE(\"pq size: \", pq.size());\n\t\t} else if (storePaths && (distances[v] == distances[u] + w)) {\n\t\t\tprevious[v].push_back(u); \t// additional predecessor\n\t\t\tnpaths[v] += npaths[u]; \t// all the shortest paths to u are also shortest paths to v now\n\t\t}\n\t});\n\n\tbool breakWhenFound = (target != none);\n\tTRACE(\"traversing graph\");\n\twhile (pq.size() > 0) {\n\t\tTRACE(\"pq size: \", pq.size());\n\t\tnode current = pq.extractMin().second;\n\t\tTRACE(\"current node in Dijkstra: \" , current);\n\t\tTRACE(\"pq size: \", pq.size());\n\t\tif (breakWhenFound && target == current) {\n\t\t\tbreak;\n\t\t}\n\n\t\tif (storeStack) {\n\t\t\tstack.push_back(current);\n\t\t}\n\n\t\tG.forEdgesOf(current, relax);\n\t}\n\n}\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6533254981040955, "alphanum_fraction": 0.6603884696960449, "avg_line_length": 21.65333366394043, "blob_id": "ff0b0678ecc29e7e7ab06bedfc47710854821d47", "content_id": "4294c8ada3af35c85352c42a270f047fa46b9324", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1700, "license_type": "permissive", "max_line_length": 224, "num_lines": 75, "path": "/Doc/doc/credits.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": ".. role:: hidden\n :class: hidden\n\n=======\nCredits\n=======\n\n:hidden:`HiddenBiggerHeadingFont`\n---------------------------------\n\nHistory\n~~~~~~~\n\nNetworKit is maintained by the `Research Group Parallel Computing <http://parco.iti.kit.edu>`_ of the Institute of Theoretical Informatics at `Karlsruhe Institute of Technology (KIT) <http://www.kit.edu/english/index.php>`_.\nIt was seeded by the project `Parallel analysis of dynamic networks -- Algorithm engineering of efficient combinatorial and numerical methods <http://parco.iti.kit.edu/forschung-en.shtml>`_, for which we acknowledge\nfinancial support by MWK Baden-Württemberg. In this project Henning Meyerhenke was the principal investigator and Christian L. Staudt was the main PhD student. Since NetworKit's start in 2013, it has grown well\nbeyond the original MWK project and our group, with contributors and users from all over the world.\n\n\nPrincipal Investigator\n~~~~~~~~~~~~~~~~~~~~~~\n\n- Henning Meyerhenke\n\n\nMaintainers\n~~~~~~~~~~~\n\n- Elisabetta Bergamini (since March 2016)\n- Maximilian Vogel\n- Christian L. Staudt (until March 2016)\n\n\nContributors\n~~~~~~~~~~~~\n\n- Lukas Barth\n- Miriam Beddig\n- Elisabetta Bergamini\n- Stefan Bertsch\n- Pratistha Bhattarai\n- Andreas Bilke\n- Simon Bischof\n- Guido Brückner\n- Kolja Esders\n- Patrick Flick\n- Michael Hamann\n- Lukas Hartmann\n- Daniel Hoske\n- Gerd Lindner\n- Moritz v. Looz\n- Yassine Marrakchi\n- Mustafa Özdayi\n- Marcel Radermacher\n- Klara Reichard\n- Marvin Ritter\n- Aleksejs Sazonovs\n- Arie Slobbe\n- Florian Weber\n- Michael Wegner\n- Jörg Weisbarth\n\n\nExternal Code\n~~~~~~~~~~~~~\n\nThe program source includes:\n\n- `The Lean Mean C++ Option Parser <http://optionparser.sourceforge.net/>`_ by Matthias S. Benkmann.\n- `TTMath bignum library <http://www.ttmath.org/>`_ by Tomasz Sowa\n\nLicense\n~~~~~~~\n\nThe source code of this program is released under the `MIT License <http://opensource.org/licenses/MIT>`_. We ask you to cite us if you use this code in your project. Feedback is also welcome.\n" }, { "alpha_fraction": 0.6871859431266785, "alphanum_fraction": 0.697236180305481, "avg_line_length": 22.41176414489746, "blob_id": "43f034661e2ba131a25540df95890720f61a1319", "content_id": "3b4c8e3509587c2dc0bbf2ba2965dc58280af76e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 796, "license_type": "permissive", "max_line_length": 102, "num_lines": 34, "path": "/networkit/cpp/numerics/test/LAMGGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LAMGGTest.h\n *\n * Created on: 20.11.2014\n * Author: Michael\n */\n\n#ifndef LAMGGTEST_H_\n#define LAMGGTEST_H_\n\n#include \"gtest/gtest.h\"\n\n#include \"../../algebraic/Vector.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../io/METISGraphWriter.h\"\n#include \"../../generators/BarabasiAlbertGenerator.h\"\n#include \"../../components/ConnectedComponents.h\"\n#include \"../../structures/Partition.h\"\n\nusing namespace std;\n\nnamespace NetworKit {\n\nclass LAMGGTest : public testing::Test {\nprotected:\n\tconst vector<string> GRAPH_INSTANCES = {\"input/jazz.graph\", \"input/power.graph\", \"input/wing.graph\"};\n\n\tVector randZeroSum(const Graph &graph, size_t seed) const;\n\tVector randVector(count dimension, double lower, double upper) const;\n};\n\n} /* namespace NetworKit */\n\n#endif /* LAMGGTEST_H_ */\n" }, { "alpha_fraction": 0.7686520218849182, "alphanum_fraction": 0.7699059844017029, "avg_line_length": 29.09433937072754, "blob_id": "5f24c42271f5c8bd3736413c35ab92ae6e317781", "content_id": "c90bf314330ee22b7d719ebf79c33936d3c2d67e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1595, "license_type": "permissive", "max_line_length": 120, "num_lines": 53, "path": "/Doc/doc/make_www.sh", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "#!/bin/bash\n#\n# Use this script to create the website for NetworKit\n\nrm -rf ../Website\n\n# Do some preparations in rst files prior to the build.\npython3 -c'import sphinxPreparation; sphinxPreparation.prepareBuild()'\n\n# Call sphinx to generate the website. This includes a build of NetworKit as python module to get the latest docstrings.\nmake html-networkit\n\n# sphinx is run twice to make sure references are used correctly\nmake html\n\n# Call doxygen to produce the C++ documentation.\ndoxygen Doxyfile\n\n# Clean up the modifications in the rst files modified during the preparation.\npython3 -c'import sphinxPreparation; sphinxPreparation.cleanUp()'\nrm -rf __pycache__\n\n# move html one up\nmv ../Website/html/* ../Website/\nrm -rd ../Website/html\n\n# copy userguides from Notebooks/\ncp ../Notebooks/NetworKit_UserGuide.ipynb ../uploads/docs/NetworKit_UserGuide.ipynb\ncp ../Notebooks/GephiStreaming_UserGuide.ipynb ../uploads/docs/GephiStreaming_UserGuide.ipynb\ncp ../Notebooks/SpectralCentrality.ipynb ../uploads/docs/SpectralCentrality.ipynb\ncp ../Notebooks/SpectralCentralityWithPandas.ipynb ../uploads/docs/SpectralCentralityWithPandas.ipynb\n\n\n# create uploads folder in ../Website\nmkdir ../Website/uploads/\n\n# create documentation\n./make_doc.sh\n\n# zip documentation and repository and move it to uploads/\nzip -r ../Website/uploads/Documentation.zip ../Documentation/\nhg archive -t zip ../Website/uploads/NetworKit.zip\n\n# remove doctrees (not needed for html)\nrm -rf ../Website/doctrees/\n\n# copy uploads folder \ncp -a ../uploads ../Website/\n\n\n\necho \"\"\necho \"Finished building Website to ../Website\"\n" }, { "alpha_fraction": 0.704273521900177, "alphanum_fraction": 0.7128205299377441, "avg_line_length": 25, "blob_id": "8214b70060cafe21c4220ff228171e91b1b6fdbc", "content_id": "62fd83741f408de4486020151be579858398939d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1170, "license_type": "permissive", "max_line_length": 115, "num_lines": 45, "path": "/networkit/cpp/centrality/DegreeCentrality.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DegreeCentrality.h\n *\n * Created on: 19.02.2014\n * Author: cls\n */\n\n#ifndef DEGREECENTRALITY_H_\n#define DEGREECENTRALITY_H_\n\n#include \"Centrality.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n * Node centrality index which ranks nodes by their degree.\n * Optional normalization by maximum degree.\n */\nclass DegreeCentrality: public NetworKit::Centrality {\npublic:\n\t/**\n\t * Constructs the DegreeCentrality class for the given Graph @a G. If the centrality scores should be normalized,\n\t * then set @a normalized to <code>true</code>. The run() method runs in O(n) time, where n is the number of\n\t * nodes in the graph.\n\t *\n\t * @param G The graph.\n\t * @param normalized Set this parameter to <code>true</code> if scores should be normalized in the interval [0,1].\n\t */\n\tDegreeCentrality(const Graph& G, bool normalized=false, bool outDeg=true, bool ignoreSelfLoops=true);\n\n\tvoid run() override;\n\n\t/**\n\t * @return the theoretical maximum degree centrality, which is $n$ (including the possibility of a self-loop)\n\t */\n\tdouble maximum() override;\n\nprivate:\n\tbool outDeg, ignoreSelfLoops;\n};\n\n} /* namespace NetworKit */\n\n#endif /* DEGREECENTRALITY_H_ */\n" }, { "alpha_fraction": 0.6204379796981812, "alphanum_fraction": 0.6496350169181824, "avg_line_length": 12.699999809265137, "blob_id": "98bf0da45aee26a7b87b9ec5754bc5dbd26aa63c", "content_id": "f817b9179b4a6de8f31b132e03914a0fb986114d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 274, "license_type": "permissive", "max_line_length": 39, "num_lines": 20, "path": "/networkit/cpp/graph/test/SSSPGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynSSSPGTest.h\n *\n * Created on: 21.07.2014\n * Author: ebergamini\n */\n\n#ifndef SSSPGTEST_H_\n#define SSSPGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass SSSPGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* SSSPGTEST_H_ */\n" }, { "alpha_fraction": 0.6524389982223511, "alphanum_fraction": 0.6686992049217224, "avg_line_length": 16.571428298950195, "blob_id": "30b18e2ecdcbc61638418084bcfdaecc436d6c80", "content_id": "562a9ff8ffda26e74d4df4729d571c90c7ea6508", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 492, "license_type": "permissive", "max_line_length": 85, "num_lines": 28, "path": "/networkit/cpp/sparsification/test/LocalDegreeGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LocalDegreeGTest.h\n *\n * Created on: 24.03.2015\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#ifndef LOCALDEGREETEST_H_\n#define LOCALDEGREETEST_H_\n\n#include <gtest/gtest.h>\n#include \"../../Globals.h\"\n#include \"../../graph/Graph.h\"\n\nnamespace NetworKit {\n\nclass LocalDegreeGTest: public testing::Test {\nprotected:\n static double getScore(const Graph& g, node x, node y, count rankX, count rankY);\n};\n\n\n} /* namespace NetworKit */\n#endif /* LOCALDEGREETEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6846307516098022, "avg_line_length": 14.181818008422852, "blob_id": "ff95e962335e028376a50c53b0c883e132791c32", "content_id": "23140fd2838f601b8c48c076f6913981621a6780", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 501, "license_type": "permissive", "max_line_length": 42, "num_lines": 33, "path": "/networkit/cpp/spanning/RandomSpanningTree.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * RandomSpanningTree.h\n *\n * Created on: 20.06.2015\n * Author: Henning\n */\n\n#ifndef RANDOMSPANNINGTREE_H_\n#define RANDOMSPANNINGTREE_H_\n\n#include \"../graph/Graph.h\"\n\nnamespace NetworKit {\n\nclass RandomSpanningTree {\npublic:\n\tRandomSpanningTree(const Graph& G);\n\tvirtual ~RandomSpanningTree();\n\n\tvoid run();\n\n\tvoid run2();\n\n\tGraph getTree();\n\nprivate:\n\tconst Graph& g;\n\tGraph tree;\n\tstd::vector<std::pair<node, node>> edges;\n};\n\n} /* namespace NetworKit */\n#endif /* RANDOMSPANNINGTREE_H_ */\n" }, { "alpha_fraction": 0.6435760855674744, "alphanum_fraction": 0.6489046812057495, "avg_line_length": 20.112499237060547, "blob_id": "5de7b824d9f1580a3ca4d47d980f387cf4283022", "content_id": "39793b0c151333094e3a14e82ed2fb8d7f99eefc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1689, "license_type": "permissive", "max_line_length": 81, "num_lines": 80, "path": "/networkit/cpp/components/StronglyConnectedComponents.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * StrongConnectedComponents.cpp\n *\n * Created on: 01.06.2014\n * Author: Klara Reichard ([email protected]), Marvin Ritter ([email protected])\n */\n\n#include <stack>\n#include <functional>\n\n#include \"StronglyConnectedComponents.h\"\n#include \"../structures/Partition.h\"\n#include \"../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\nStronglyConnectedComponents::StronglyConnectedComponents(const Graph& G) : G(G) {\n\n}\n\nvoid StronglyConnectedComponents::run() {\n\tcount z = G.upperNodeIdBound();\n\tcomponent = Partition(z);\n\n\tindex nextIndex = 0;\n\tstd::vector<index> nodeIndex(z, none);\n\tstd::vector<index> nodeLowLink(z, none);\n\tstd::stack<node> stx;\n\tstd::vector<bool> onStack(z, false);\n\n\tstd::function<void(node)> strongConnect = [&](node v) {\n\t\tnodeIndex[v] = nextIndex++;\n\t\tnodeLowLink[v] = nodeIndex[v];\n\t\tstx.push(v);\n\t\tonStack[v] = true;\n\n\t\tG.forNeighborsOf(v, [&](node w) {\n\t\t\tif (nodeIndex[w] == none) {\n\t\t\t\tstrongConnect(w);\n\t\t\t\tnodeLowLink[v] = std::min(nodeLowLink[v], nodeLowLink[w]);\n\t\t\t} else if (onStack[w]) {\n\t\t\t\tnodeLowLink[v] = std::min(nodeLowLink[v], nodeIndex[w]);\n\t\t\t}\n\t\t});\n\n\t\tif (nodeLowLink[v] == nodeIndex[v]) {\n\t\t\tcomponent.toSingleton(v);\n\t\t\twhile (true) {\n\t\t\t\tnode w = stx.top();\n\t\t\t\tstx.pop();\n\t\t\t\tonStack[w] = false;\n\t\t\t\tif (w == v) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcomponent[w] = component[v];\n\t\t\t}\n\t\t}\n\t};\n\n\tG.forNodes([&](node v) {\n\t\tif (nodeIndex[v] == none) {\n\t\t\tstrongConnect(v);\n\t\t}\n\t});\n}\n\nPartition StronglyConnectedComponents::getPartition() {\n\treturn this->component;\n}\n\ncount StronglyConnectedComponents::numberOfComponents() {\n\treturn this->component.numberOfSubsets();\n}\n\ncount StronglyConnectedComponents::componentOfNode(node u) {\n\tassert (component[u] != none);\n\treturn component[u];\n}\n\n}\n" }, { "alpha_fraction": 0.662243664264679, "alphanum_fraction": 0.6690791845321655, "avg_line_length": 25.457447052001953, "blob_id": "b5310a9ebc879c19d7cfd041441890d348731725", "content_id": "219c5de538ae431a40208312cd7e11dfa57898f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2487, "license_type": "permissive", "max_line_length": 118, "num_lines": 94, "path": "/networkit/cpp/graph/DynDijkstra.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynDijkstra.cpp\n *\n * Created on: 21.07.2014\n * Author: ebergamini\n */\n\n#include \"Dijkstra.h\"\n#include \"DynDijkstra.h\"\n#include \"../auxiliary/Log.h\"\n#include \"../auxiliary/PrioQueue.h\"\n#include \"../auxiliary/NumericTools.h\"\n#include <queue>\n\n\nnamespace NetworKit {\n\nDynDijkstra::DynDijkstra(const Graph& G, node source, bool storePredecessors) : DynSSSP(G, source, storePredecessors),\ncolor(G.upperNodeIdBound(), WHITE) {\n\n}\n\nvoid DynDijkstra::run() {\n\tDijkstra dij(G, source, true);\n\tdij.run();\n\tdistances = dij.distances;\n\tnpaths = dij.npaths;\n\tif (storePreds) {\n\t\tprevious = dij.previous;\n\t}\n}\n\nvoid DynDijkstra::update(const std::vector<GraphEvent>& batch) {\n\tmod = false;\n\t// priority queue with distance-node pairs\n\tAux::PrioQueue<edgeweight, node> Q(G.upperNodeIdBound());\n\t// queue with all visited nodes\n\tstd::queue<node> visited;\n\t// if u has a new shortest path going through v, it updates the distance of u\n\t// and inserts u in the priority queue (or updates its priority, if already in Q)\n\tauto updateQueue = [&](node u, node v, edgeweight w) {\n\t\tif (distances[u] >= distances[v]+w) {\n\t\t\tdistances[u] = distances[v]+w;\n\t\t\tif (color[u] == WHITE) {\n\t\t\t\tQ.insert(distances[u], u);\n\t\t\t\tcolor[u] = BLACK;\n\t\t\t}\telse {\n\t\t\t\tQ.decreaseKey(distances[u], u);\n\t\t\t}\n\t\t}\n\t};\n\n\tfor (GraphEvent edge : batch) {\n\t\tif (edge.type!=GraphEvent::EDGE_ADDITION && edge.type!=GraphEvent::EDGE_WEIGHT_UPDATE)\n\t\t\tthrow std::runtime_error(\"Graph update not allowed\");\n\t\t//TODO: discuss with Christian whether you can substitute weight_update with with_increase/weight_decrease\n\t\t// otherwise, it is not possbile to check wether the change in the weight is positive or negative\n\t\tupdateQueue(edge.u, edge.v, edge.w);\n\t\tupdateQueue(edge.v, edge.u, edge.w);\n\t}\n\n\twhile(Q.size() != 0) {\n\t\tmod = true;\n\t\tnode current = Q.extractMin().second;\n\t\tvisited.push(current);\n\t\tif (storePreds) {\n\t\t\tprevious[current].clear();\n\t\t}\n\t\tnpaths[current] = 0;\n\t\tG.forInNeighborsOf(current, [&](node current, node z, edgeweight w){\n\t\t\t//z is a predecessor of current node\n\t\t\tif (Aux::NumericTools::equal(distances[current], distances[z]+w, 0.000001)) {\n\t\t\t\tif (storePreds) {\n\t\t\t\t\tprevious[current].push_back(z);\n\t\t\t\t}\n\t\t\t\tnpaths[current] += npaths[z];\n\t\t\t}\n\t\t\t//check whether curent node is a predecessor of z\n\t\t\telse {\n\t\t\t\tupdateQueue(z, current, w);\n\t\t\t}\n\t\t});\n\t}\n\n\t// reset colors\n\twhile(!visited.empty()) {\n\t\tnode w = visited.front();\n\t\tvisited.pop();\n\t\tcolor[w] = WHITE;\n\t}\n\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6448979377746582, "alphanum_fraction": 0.6612244844436646, "avg_line_length": 15.333333015441895, "blob_id": "575193d0f54d520dc3918cd9300e3f35a90d8758", "content_id": "090f7f15f7f41f82e3ed666ef2464c610ec79c9a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 490, "license_type": "permissive", "max_line_length": 47, "num_lines": 30, "path": "/networkit/cpp/algebraic/AdjacencyMatrix.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * AdjacencyMatrix.h\n *\n * Created on: 28.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef ADJACENCYMATRIX_H_\n#define ADJACENCYMATRIX_H_\n\n#include \"../graph/Graph.h\"\n#include \"Matrix.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup algebraic\n * Adjacency matrix of a Graph.\n */\nclass AdjacencyMatrix : public Matrix {\npublic:\n\t/**\n\t * Constructs the AdjacencyMatrix of @a graph.\n\t */\n\tAdjacencyMatrix(const Graph &graph);\n};\n\n} /* namespace NetworKit */\n\n#endif /* ADJACENCYMATRIX_H_ */\n" }, { "alpha_fraction": 0.646258533000946, "alphanum_fraction": 0.6734693646430969, "avg_line_length": 13.699999809265137, "blob_id": "203c3b29ef6ac56194de788915f1a022b2befbcc", "content_id": "3bfa7ecac81a1b259d3eee2d4340a19746d4b97e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 294, "license_type": "permissive", "max_line_length": 45, "num_lines": 20, "path": "/networkit/cpp/centrality/test/CentralityGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CentralityGTest.h\n *\n * Created on: 19.02.2014\n * Author: cls\n */\n\n#ifndef CENTRALITYGTEST_H_\n#define CENTRALITYGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass CentralityGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* CENTRALITYGTEST_H_ */\n" }, { "alpha_fraction": 0.7428115010261536, "alphanum_fraction": 0.7523961663246155, "avg_line_length": 20.586206436157227, "blob_id": "4f788d340028e02470c1f58b88add3baee478c97", "content_id": "7d7f20499f97a5f2d5c5378bd36d74e80ff92e3d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 626, "license_type": "permissive", "max_line_length": 70, "num_lines": 29, "path": "/networkit/cpp/distance/test/CommuteTimeDistanceGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CommuteTimeDistanceGTest.h\n *\n * Created on: Jan 17, 2016\n * Author: Michael\n */\n\n#ifndef NETWORKIT_CPP_CENTRALITY_TEST_COMMUTETIMEDISTANCEGTEST_H_\n#define NETWORKIT_CPP_CENTRALITY_TEST_COMMUTETIMEDISTANCEGTEST_H_\n\n#include \"gtest/gtest.h\"\n#include \"../CommuteTimeDistance.h\"\n\n#include <vector>\n#include <string>\n\nnamespace NetworKit {\n\nusing namespace std;\n\nclass CommuteTimeDistanceGTest : public testing::Test {\npublic:\n\tCommuteTimeDistanceGTest() = default;\n\tvirtual ~CommuteTimeDistanceGTest() = default;\n};\n\n} /* namespace NetworKit */\n\n#endif /* NETWORKIT_CPP_CENTRALITY_TEST_COMMUTETIMEDISTANCEGTEST_H_ */\n" }, { "alpha_fraction": 0.6551433205604553, "alphanum_fraction": 0.6854974627494812, "avg_line_length": 20.962963104248047, "blob_id": "d97a23109995d4e9059ace24ed0ad4bcf2f4ba29", "content_id": "49108738d56a88bfdbdf109c37988052e398dc12", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1186, "license_type": "permissive", "max_line_length": 64, "num_lines": 54, "path": "/networkit/cpp/dynamics/test/DynamicsGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynamicsGTest.cpp\n *\n * Created on: 24.12.2013\n * Author: cls\n */\n\n#include \"DynamicsGTest.h\"\n\n#include \"../DGSStreamParser.h\"\n#include \"../../auxiliary/Log.h\"\n#include \"../GraphEvent.h\"\n#include \"../GraphUpdater.h\"\n\nnamespace NetworKit {\n\nTEST_F(DynamicsGTest, testDGSStreamParser) {\n\tDGSStreamParser parser(\"input/example2.dgs\");\n\tauto stream = parser.getStream();\n\tfor (auto event : stream) {\n\t\tINFO(event.toString(), \" \");\n\t}\n\tINFO(\"\\n\");\n}\n\n\nTEST_F(DynamicsGTest, tryDGSStreamParserOnRealGraph) {\n\tstd::string path;\n\tstd::cout << \"enter .dgs file path: \";\n\tstd::cin >> path;\n\tDGSStreamParser parser(path);\n\tauto stream = parser.getStream();\n}\n\nTEST_F(DynamicsGTest, testGraphEventIncrement) {\n\tGraph G(2, true, false); //undirected\n\tGraph H(2, true, true); //directed\n\tG.addEdge(0, 1, 3.14);\n\tH.addEdge(0, 1, 3.14);\n\tGraphEvent event(GraphEvent::EDGE_WEIGHT_INCREMENT, 0, 1, 2.1);\n\tstd::vector<GraphEvent> eventstream(1);\n\teventstream.push_back(event);\n\tGraphUpdater Gupdater(G);\n\tGraphUpdater Hupdater(H);\n\tGupdater.update(eventstream);\n\tHupdater.update(eventstream);\n\tEXPECT_EQ(G.weight(0,1), 5.24);\n\tEXPECT_EQ(H.weight(0,1), 5.24);\n\n\n\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6527153849601746, "alphanum_fraction": 0.6635361313819885, "avg_line_length": 28.154762268066406, "blob_id": "b2c189e10482af4a7003804a5b0d3707c8dc24f4", "content_id": "93e14234bab26a8406e4ed7aa90f60f765eb054a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4898, "license_type": "permissive", "max_line_length": 147, "num_lines": 168, "path": "/networkit/cpp/algebraic/DenseMatrix.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DenseMatrix.cpp\n *\n * Created on: Nov 25, 2015\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"DenseMatrix.h\"\n\nnamespace NetworKit {\n\nDenseMatrix::DenseMatrix() : nRows(0), nCols(0), entries(std::vector<double>(0)) {\n}\n\nDenseMatrix::DenseMatrix(const count nRows, const count nCols, const std::vector<double> &entries) : nRows(nRows), nCols(nCols), entries(entries) {\n\tassert(entries.size() == nRows * nCols);\n}\n\ndouble DenseMatrix::operator()(const index i, const index j) const {\n\treturn entries[i * numberOfColumns() + j];\n}\n\nvoid DenseMatrix::setValue(const index i, const index j, const double value) {\n\tentries[i * numberOfColumns() + j] = value;\n}\n\nVector DenseMatrix::row(const index i) const {\n\tVector row(numberOfColumns(), 0.0, true);\n\tindex offset = i * numberOfColumns();\n#pragma omp parallel for\n\tfor (index j = 0; j < numberOfColumns(); ++j) {\n\t\trow[j] = entries[offset + j];\n\t}\n\n\treturn row;\n}\n\nVector DenseMatrix::column(const index j) const {\n\tVector column(numberOfRows(), 0.0);\n#pragma omp parallel for\n\tfor (index i = 0; i < numberOfRows(); ++i) {\n\t\tcolumn[i] = entries[i * numberOfColumns() + j];\n\t}\n\n\treturn column;\n}\n\nVector DenseMatrix::diagonal() const {\n\tVector diagonal(std::min(numberOfRows(), numberOfColumns()), 0.0);\n#pragma omp parallel for\n\tfor (index i = 0; i < diagonal.getDimension(); ++i) {\n\t\tdiagonal[i] = (*this)(i,i);\n\t}\n\n\treturn diagonal;\n}\n\nDenseMatrix DenseMatrix::operator+(const DenseMatrix &other) const {\n\tassert(numberOfRows() == other.numberOfRows() && numberOfColumns() == other.numberOfColumns());\n\treturn DenseMatrix::binaryOperator(*this, other, [](double val1, double val2){return val1 + val2;});\n}\n\nDenseMatrix& DenseMatrix::operator+=(const DenseMatrix &other) {\n\tassert(numberOfRows() == other.numberOfRows() && numberOfColumns() == other.numberOfColumns());\n\t*this = DenseMatrix::binaryOperator(*this, other, [](double val1, double val2){return val1 + val2;});\n\treturn *this;\n}\n\nDenseMatrix DenseMatrix::operator-(const DenseMatrix &other) const {\n\tassert(numberOfRows() == other.numberOfRows() && numberOfColumns() == other.numberOfColumns());\n\treturn DenseMatrix::binaryOperator(*this, other, [](double val1, double val2){return val1 - val2;});\n}\n\nDenseMatrix& DenseMatrix::operator-=(const DenseMatrix &other) {\n\tassert(numberOfRows() == other.numberOfRows() && numberOfColumns() == other.numberOfColumns());\n\t*this = DenseMatrix::binaryOperator(*this, other, [](double val1, double val2){return val1 + val2;});\n\treturn *this;\n}\n\nDenseMatrix DenseMatrix::operator*(const double &scalar) const {\n\treturn DenseMatrix(*this) *= scalar;\n}\n\nDenseMatrix& DenseMatrix::operator*=(const double &scalar) {\n#pragma omp parallel for\n\tfor (index k = 0; k < entries.size(); ++k) {\n\t\tentries[k] *= scalar;\n\t}\n\n\treturn *this;\n}\n\nVector DenseMatrix::operator*(const Vector &vector) const {\n\tassert(!vector.isTransposed());\n\tassert(numberOfColumns() == vector.getDimension());\n\n\tVector result(numberOfRows(), 0.0);\n#pragma omp parallel for\n\tfor (index i = 0; i < numberOfRows(); ++i) {\n\t\tindex offset = i * numberOfColumns();\n\t\tfor (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) {\n\t\t\tresult[i] += entries[k] * vector[j];\n\t\t}\n\t}\n\n\treturn result;\n}\n\nDenseMatrix DenseMatrix::operator*(const DenseMatrix &other) const {\n\tassert(numberOfColumns() == other.numberOfRows());\n\tstd::vector<double> resultEntries(numberOfRows() * other.numberOfColumns());\n\n#pragma omp parallel for\n\tfor (index i = 0; i < numberOfRows(); ++i) {\n\t\tindex offset = i * numberOfRows();\n\t\tfor (index k = 0; k < numberOfColumns(); ++k) {\n\t\t\tdouble val_i_k = (*this)(i,k);\n\t\t\tfor (index j = 0; j < other.numberOfColumns(); ++j) {\n\t\t\t\tresultEntries[offset + j] += val_i_k * other(k,j);\n\t\t\t}\n\t\t}\n\t}\n\n\treturn DenseMatrix(numberOfRows(), other.numberOfColumns(), resultEntries);\n}\n\nDenseMatrix DenseMatrix::operator/(const double &divisor) const {\n\treturn DenseMatrix(*this) /= divisor;\n}\n\nDenseMatrix& DenseMatrix::operator/=(const double &divisor) {\n\treturn *this *= 1.0 / divisor;\n}\n\nvoid DenseMatrix::LUDecomposition(DenseMatrix &matrix) {\n\tassert(matrix.numberOfRows() == matrix.numberOfColumns());\n\tfor (index k = 0; k < matrix.numberOfRows()-1; ++k) {\n\t\tfor (index i = k+1; i < matrix.numberOfRows(); ++i) {\n\t\t\tmatrix.setValue(i, k, matrix(i,k) / matrix(k,k));\n\t\t\tfor (index j = k+1; j < matrix.numberOfRows(); ++j) {\n\t\t\t\tmatrix.setValue(i, j, matrix(i,j) - (matrix(i,k) * matrix(k,j)));\n\t\t\t}\n\t\t}\n\t}\n}\n\nVector DenseMatrix::LUSolve(const DenseMatrix &LU, const Vector &b) {\n\tVector x = b;\n\n\tfor (index i = 0; i < LU.numberOfRows()-1; ++i) { // forward substitution\n\t\tfor (index j = i+1; j < LU.numberOfRows(); ++j) {\n\t\t\tx[j] -= x[i] * LU(j,i);\n\t\t}\n\t}\n\n\tfor (index i = LU.numberOfRows(); i-- > 0;) { // backward substitution\n\t\tx[i] /= LU(i,i);\n\t\tfor (index j = 0; j < i; ++j) {\n\t\t\tx[j] -= x[i] * LU(j,i);\n\t\t}\n\t}\n\n\treturn x;\n}\n\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6104688048362732, "alphanum_fraction": 0.6165650486946106, "avg_line_length": 20.142221450805664, "blob_id": "28e7a34ed1871889587eb18ec4e8e5c2aeb35583", "content_id": "c35cc0f92e304b53a52a87d4dcaedcb88dfafd6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4757, "license_type": "permissive", "max_line_length": 103, "num_lines": 225, "path": "/networkit/cpp/viz/Point.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Point.h\n *\n * Created on: Apr 11, 2013\n * Author: Henning\n */\n\n#ifndef POINT_H_\n#define POINT_H_\n\n#include <vector>\n#include <cinttypes>\n#include <cassert>\n#include <cmath>\n#include <cstdint>\n#include <iostream>\n#include <sstream>\n#include \"../Globals.h\"\n\nnamespace NetworKit {\n\n//template<class T> class Point;\n//\n//template<class T>\n//std::ostream& operator <<(std::ostream& out, Point<T>& point);\n\n\n\n/**\n * @ingroup viz\n *\n * DEPRECATED: To take advantage of automatic mapping between C++ and Python data structures, use\n * standard library containers (std::pair, std::tuple..) instead.\n *\n * Points in any dimension of templated type.\n */\ntemplate<class T>\nclass Point {\nprotected:\n\tstd::vector<T> data;\n\npublic:\n\tPoint() { data = {0.0, 0.0}; }\n\tPoint(T x, T y) { data = {x, y}; }\n\tPoint(std::vector<T>& values): data(values) {}\n\tvirtual ~Point() {}\n\n\tcount getDimensions() const { return data.size(); }\n\n\tT distance(const Point<T>& p) const;\n\tT squaredDistance(const Point<T>& p) const;\n\n\tPoint& operator+=(const Point<T>& p);\n\tPoint& operator-=(const Point<T>& p);\n\tPoint& scale(const T factor);\n\n\tPoint operator-(const Point<T>& other);\n\tPoint operator+(const Point<T>& other);\n\n\tT length() const;\n\tT squaredLength() const;\n\n\tT& operator[](const index i);\n\tT at(const index i) const;\n\n\t/**\n\t * Default point to string conversion.\n\t */\n\tstd::string toString();\n\n\t/**\n\t * Point to comma separated string.\n\t */\n\tstd::string toCsvString();\n\n\t/**\n\t * Point to space separated string.\n\t */\n\tstd::string toSsvString();\n\n\tstd::string genericToString(const std::string& start, const std::string& sep, const std::string& end);\n\n//\tfriend std::ostream& operator<< <>(std::ostream &out, Point<T>& point);\n};\n\ntemplate<class T>\nT Point<T>::length() const {\n\tT length = (T) 0;\n\tfor (index i = 0; i < data.size(); ++i) {\n\t\tT diff = this->data[i];\n\t\tlength += diff * diff;\n\t}\n\treturn sqrt(length);\n}\n\ntemplate<class T>\nT Point<T>::squaredLength() const {\n\tT length = (T) 0;\n\tfor (index i = 0; i < data.size(); ++i) {\n\t\tT diff = this->data[i];\n\t\tlength += diff * diff;\n\t}\n\treturn length;\n}\n\ntemplate<class T>\nT Point<T>::squaredDistance(const Point<T>& p) const {\n\tassert(this->data.size() == p.data.size());\n\tT dist = (T) 0;\n\tfor (index i = 0; i < data.size(); ++i) {\n\t\tT diff = this->data[i] - p.data[i];\n\t\tdist += diff * diff;\n\t}\n\treturn dist;\n}\n\ntemplate<class T>\nT Point<T>::distance(const Point<T>& p) const {\n\treturn sqrt(squaredDistance(p));\n}\n\ntemplate<class T>\nPoint<T>& Point<T>::operator+=(const Point<T>& p) {\n\tassert(this->data.size() == p.data.size());\n\tfor (index i = 0; i < data.size(); ++i) {\n\t\tthis->data[i] += p.data[i];\n\t}\n\treturn *this;\n}\n\ntemplate<class T>\nPoint<T>& Point<T>::operator-=(const Point<T>& p) {\n\tassert(this->data.size() == p.data.size());\n\tfor (index i = 0; i < data.size(); ++i) {\n\t\tthis->data[i] -= p.data[i];\n\t}\n\treturn *this;\n}\n\ntemplate<class T>\nPoint<T> Point<T>::operator-(const Point<T>& other) {\n\tPoint<T> result(*this);\n\tassert(result.data.size() == other.data.size());\n\tfor (index i = 0; i < result.data.size(); ++i) {\n\t\tresult.data[i] -= other.data[i];\n\t}\n\treturn result;\n}\n\ntemplate<class T>\nPoint<T> Point<T>::operator+(const Point<T>& other) {\n\tPoint<T> result(*this);\n\tassert(result.data.size() == other.data.size());\n\tfor (index i = 0; i < result.data.size(); ++i) {\n\t\tresult.data[i] += other.data[i];\n\t}\n\treturn result;\n}\n\n\ntemplate<class T>\nPoint<T>& Point<T>::scale(const T factor) {\n\tfor (index i = 0; i < data.size(); ++i) {\n\t\tthis->data[i] *= factor;\n\t}\n\treturn *this;\n}\n\ntemplate<class T>\ninline T& Point<T>::operator [](index i) {\n\tassert(i >= 0 && i < data.size());\n\treturn data[i];\n}\n\ntemplate<class T>\ninline T Point<T>::at(index i) const {\n\tassert(i >= 0 && i < data.size());\n\treturn data.at(i);\n}\n\ntemplate<class T>\nstd::ostream& operator <<(std::ostream& out, Point<T>& point)\n{\n\tassert(point.data.size() > 0);\n\tout << \"(\" << point[0];\n\tfor (index i = 1; i < point.data.size(); ++i) {\n\t\tout << \", \" << point.data[i];\n\t}\n\tout << \")\";\n\treturn out;\n}\n\ntemplate<class T>\nstd::string Point<T>::toString() {\n\treturn genericToString(\"\", \", \", \"\");\n}\n\ntemplate<class T>\ninline std::string NetworKit::Point<T>::toCsvString() {\n\treturn genericToString(\"(\", \", \", \")\");\n}\n\ntemplate<class T>\ninline std::string NetworKit::Point<T>::toSsvString() {\n\treturn genericToString(\"\", \" \", \"\");\n}\n\ntemplate<class T>\ninline std::string NetworKit::Point<T>::genericToString(\n\t\tconst std::string& start, const std::string& sep,\n\t\tconst std::string& end)\n{\n\tassert(this->data.size() > 0);\n\tstd::stringstream out;\n\tout << start << (*this)[0];\n\tfor (index i = 1; i < this->data.size(); ++i) {\n\t\tout << sep << this->data[i];\n\t}\n\tout << end;\n\treturn out.str();\n}\n\n} /* namespace NetworKit */\n\n#endif /* POINT_H_ */\n" }, { "alpha_fraction": 0.6497604846954346, "alphanum_fraction": 0.656579315662384, "avg_line_length": 29.914634704589844, "blob_id": "d6ac2e2f071e67c650fe120cd0582a8660a84e81", "content_id": "1ff263a186a6979ed6f1c9a889f26357a3e66fb4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 17745, "license_type": "permissive", "max_line_length": 181, "num_lines": 574, "path": "/networkit/cpp/generators/quadtree/QuadNodeCartesianEuclid.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * QuadNodePolarEuclid.h\n *\n * Created on: 21.05.2014\n * Author: Moritz v. Looz ([email protected])\n *\n * Note: This is similar enough to QuadNode.h that one could merge these two classes.\n */\n\n#ifndef QUADNODECARTESIANEUCLID_H_\n#define QUADNODECARTESIANEUCLID_H_\n\n#include <vector>\n#include <algorithm>\n#include <functional>\n#include <assert.h>\n#include \"../../auxiliary/Log.h\"\n#include \"../../geometric/HyperbolicSpace.h\"\n\nusing std::vector;\nusing std::min;\nusing std::max;\nusing std::cos;\n\nnamespace NetworKit {\n\ntemplate <class T>\nclass QuadNodeCartesianEuclid {\n\tfriend class QuadTreeGTest;\nprivate:\n\tPoint<double> minPoint;\n\tPoint<double> maxPoint;\n\tcount dimension;\n\tunsigned capacity;\n\tstatic const unsigned coarsenLimit = 4;\n\tstatic const long unsigned sanityNodeLimit = 10E15; //just assuming, for debug purposes, that this algorithm never runs on machines with more than 4 Petabyte RAM\n\tcount subTreeSize;\n\tstd::vector<T> content;\n\tstd::vector<Point<double> > positions;\n\tbool isLeaf;\n\tbool splitTheoretical;\n\tindex ID;\n\tdouble lowerBoundR;\n\npublic:\n\tstd::vector<QuadNodeCartesianEuclid> children;\n\n\t/**\n\t * Construct a QuadNode for polar coordinates.\n\t *\n\t *\n\t * @param leftAngle Minimal angular coordinate of region, in radians from 0 to 2\\pi\n\t * @param rightAngle Maximal angular coordinate of region, in radians from 0 to 2\\pi\n\t * @param minR Minimal radial coordinate of region, between 0 and 1\n\t * @param maxR Maximal radial coordinate of region, between 0 and 1\n\t * @param capacity Number of points a leaf cell can store before splitting\n\t * @param minDiameter Minimal diameter of a quadtree node. If the node is already smaller, don't split even if over capacity. Default is 0\n\t * @param splitTheoretical Whether to split in a theoretically optimal way or in a way to decrease measured running times\n\t * @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true\n\t * @param diagnostics Count how many necessary and unnecessary comparisons happen in leaf cells? Will cause race condition and false sharing in parallel use\n\t *\n\t */\n\tQuadNodeCartesianEuclid(Point<double> lower = Point<double>({0.0, 0.0}), Point<double> upper = Point<double>({1.0, 1.0}), unsigned capacity = 1000, bool splitTheoretical = false) {\n\t\tthis->minPoint = lower;\n\t\tthis->maxPoint = upper;\n\t\tthis->dimension = minPoint.getDimensions();\n\t\tassert(maxPoint.getDimensions() == dimension);\n\t\tthis->capacity = capacity;\n\t\tthis->splitTheoretical = splitTheoretical;\n\t\tthis->ID = 0;\n\t\tisLeaf = true;\n\t\tsubTreeSize = 0;\n\t}\n\n\tvoid split() {\n\t\tassert(isLeaf);\n\t\tassert(children.size() == 0);\n\t\tvector<double> middle(dimension);\n\t\tif (splitTheoretical) {\n\t\t\t//Euclidean space is distributed equally\n\t\t\tfor (index d = 0; d < dimension; d++) {\n\t\t\t\tmiddle[d] = (minPoint[d] + maxPoint[d]) / 2;\n\t\t\t}\n\t\t} else {\n\t\t\t//median of points\n\t\t\tconst count numPoints = positions.size();\n\t\t\tassert(numPoints > 0);//otherwise, why split?\n\t\t\tvector<vector<double> > sorted(dimension);\n\t\t\tfor (index d = 0; d < dimension; d++) {\n\t\t\t\tsorted[d].resize(numPoints);\n\t\t\t\tfor (index i = 0; i < numPoints; i++) {\n\t\t\t\t\tsorted[d][i] = positions[i][d];\n\t\t\t\t}\n\t\t\t\tstd::sort(sorted[d].begin(), sorted[d].end());\n\t\t\t\tmiddle[d] = sorted[d][numPoints/2];//this will crash if no points are there!\n\t\t\t\tassert(middle[d] <= maxPoint[d]);\n\t\t\t\tassert(middle[d] >= minPoint[d]);\n\t\t\t}\n\t\t}\n\t\tcount childCount = pow(2,dimension);\n\t\tfor (index i = 0; i < childCount; i++) {\n\t\t\tvector<double> lowerValues(dimension);\n\t\t\tvector<double> upperValues(dimension);\n\t\t\tindex bitCopy = i;\n\t\t\tfor (index d = 0; d < dimension; d++) {\n\t\t\t\tif (bitCopy & 1) {\n\t\t\t\t\tlowerValues[d] = middle[d];\n\t\t\t\t\tupperValues[d] = maxPoint[d];\n\t\t\t\t} else {\n\t\t\t\t\tlowerValues[d] = minPoint[d];\n\t\t\t\t\tupperValues[d] = middle[d];\n\t\t\t\t}\n\t\t\t\tbitCopy = bitCopy >> 1;\n\t\t\t}\n\t\t\tQuadNodeCartesianEuclid child(Point<double>(lowerValues), Point<double>(upperValues), capacity, splitTheoretical);\n\t\t\tassert(child.isLeaf);\n\t\t\tchildren.push_back(child);\n\t\t}\n\t\tisLeaf = false;\n\t}\n\n\t/**\n\t * Add a point at polar coordinates (angle, R) with content input. May split node if capacity is full\n\t *\n\t * @param input arbitrary content, in our case an index\n\t * @param angle angular coordinate of point, between 0 and 2 pi.\n\t * @param R radial coordinate of point, between 0 and 1.\n\t */\n\tvoid addContent(T input, Point<double> pos) {\n\t\tassert(input < sanityNodeLimit);\n\t\tassert(content.size() == positions.size());\n\t\tassert(this->responsible(pos));\n\t\tif (isLeaf) {\n\t\t\tif (content.size() + 1 < capacity) {\n\t\t\t\tcontent.push_back(input);\n\t\t\t\tpositions.push_back(pos);\n\t\t\t} else {\n\t\t\t\tsplit();\n\n\t\t\t\tfor (index i = 0; i < content.size(); i++) {\n\t\t\t\t\tthis->addContent(content[i], positions[i]);\n\t\t\t\t}\n\t\t\t\tassert(subTreeSize == content.size());//we have added everything twice\n\t\t\t\tsubTreeSize = content.size();\n\t\t\t\tcontent.clear();\n\t\t\t\tpositions.clear();\n\t\t\t\tthis->addContent(input, pos);\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tassert(children.size() > 0);\n\t\t\tbool foundResponsibleChild = false;\n\t\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\t\tif (children[i].responsible(pos)) {\n\t\t\t\t\tfoundResponsibleChild = true;\n\t\t\t\t\tchildren[i].addContent(input, pos);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert(foundResponsibleChild);\n\t\t\tsubTreeSize++;\n\t\t}\n\t}\n\n\t/**\n\t * Remove content at coordinate pos. May cause coarsening of the quadtree\n\t *\n\t * @param input Content to be removed\n\t * @param pos Coordinate of content\n\t *\n\t * @return True if content was found and removed, false otherwise\n\t */\n\tbool removeContent(T input, Point<double> pos) {\n\t\tif (!responsible(pos)) return false;\n\t\tif (isLeaf) {\n\t\t\tindex i = 0;\n\t\t\tfor (; i < content.size(); i++) {\n\t\t\t\tif (content[i] == input) break;\n\t\t\t}\n\t\t\tif (i < content.size()) {\n\t\t\t\tassert(positions[i].distance(pos) == 0);\n\t\t\t\t//remove element\n\t\t\t\tcontent.erase(content.begin()+i);\n\t\t\t\tpositions.erase(positions.begin()+i);\n\t\t\t\treturn true;\n\t\t\t} else {\n\t\t\t\treturn false;\n\t\t\t}\n\t\t}\n\t\telse {\n\t\t\tbool removed = false;\n\t\t\tbool allLeaves = true;\n\t\t\tassert(children.size() > 0);\n\t\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\t\tif (!children[i].isLeaf) allLeaves = false;\n\t\t\t\tif (children[i].removeContent(input, pos)) {\n\t\t\t\t\tassert(!removed);\n\t\t\t\t\tremoved = true;\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (removed) subTreeSize--;\n\t\t\t//coarsen?\n\t\t\tif (removed && allLeaves && size() < coarsenLimit) {\n\t\t\t\t//coarsen!!\n\t\t\t\t//why not assert empty containers and then insert directly?\n\t\t\t\tvector<T> allContent;\n\t\t\t\tvector<Point<double> > allPositions;\n\t\t\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\t\t\tallContent.insert(allContent.end(), children[i].content.begin(), children[i].content.end());\n\t\t\t\t\tallPositions.insert(allPositions.end(), children[i].positions.begin(), children[i].positions.end());\n\t\t\t\t}\n\t\t\t\tassert(allContent.size() == allPositions.size());\n\t\t\t\tchildren.clear();\n\t\t\t\tcontent.swap(allContent);\n\t\t\t\tpositions.swap(allPositions);\n\t\t\t\tisLeaf = true;\n\t\t\t}\n\n\t\t\treturn removed;\n\t\t}\n\t}\n\n\n\t/**\n\t * Check whether the region managed by this node lies outside of an Euclidean circle.\n\t *\n\t * @param query Center of the Euclidean query circle, given in Cartesian coordinates\n\t * @param radius Radius of the Euclidean query circle\n\t *\n\t * @return True if the region managed by this node lies completely outside of the circle\n\t */\n\tbool outOfReach(Point<double> query, double radius) const {\n\t\treturn EuclideanDistances(query).first > radius;\n\t}\n\n\t/**\n\t * @param query Position of the query point\n\t */\n\tstd::pair<double, double> EuclideanDistances(Point<double> query) const {\n\t\t/**\n\t\t * If the query point is not within the quadnode, the distance minimum is on the border.\n\t\t * Need to check whether extremum is between corners.\n\t\t */\n\t\tdouble maxDistance = 0;\n\t\tdouble minDistance = std::numeric_limits<double>::max();\n\t\t//Point<double> minCopy(minPoint);\n\t\t//Point<double> maxCopy(minPoint);\n\n\t\tif (responsible(query)) minDistance = 0;\n\n\t\tauto updateMinMax = [&minDistance, &maxDistance, query](Point<double> pos){\n\t\t\tdouble extremalValue = pos.distance(query);\n\t\t\tmaxDistance = std::max(extremalValue, maxDistance);\n\t\t\tminDistance = std::min(minDistance, extremalValue);\n\t\t};\n\n\t\tvector<double> closestValues(dimension);\n\t\tvector<double> farthestValues(dimension);\n\n\t\tfor (index d = 0; d < dimension; d++) {\n\t\t\tif (std::abs(query[d] - minPoint.at(d)) < std::abs(query[d] - maxPoint.at(d))) {\n\t\t\t\tclosestValues[d] = minPoint.at(d);\n\t\t\t\tfarthestValues[d] = maxPoint.at(d);\n\t\t\t} else {\n\t\t\t\tfarthestValues[d] = minPoint.at(d);\n\t\t\t\tclosestValues[d] = maxPoint.at(d);\n\t\t\t}\n\t\t\tif (query[d] >= minPoint.at(d) && query[d] <= maxPoint.at(d)) {\n\t\t\t\tclosestValues[d] = query[d];\n\t\t\t}\n\t\t}\n\t\tupdateMinMax(Point<double>(closestValues));\n\t\tupdateMinMax(Point<double>(farthestValues));\n\n\t\tassert(minDistance < query.length() + maxPoint.length());\n\t\tassert(minDistance < maxDistance);\n\t\treturn std::pair<double, double>(minDistance, maxDistance);\n\t}\n\n\n\t/**\n\t * Does the point at (angle, r) fall inside the region managed by this QuadNode?\n\t *\n\t * @param angle Angular coordinate of input point\n\t * @param r Radial coordinate of input points\n\t *\n\t * @return True if input point lies within the region of this QuadNode\n\t */\n\tbool responsible(Point<double> pos) const {\n\t\tfor (index d = 0; d < dimension; d++) {\n\t\t\tif (pos[d] < minPoint.at(d) || pos[d] >= maxPoint.at(d)) return false;\n\t\t}\n\t\treturn true;\n\t}\n\n\t/**\n\t * Get all Elements in this QuadNode or a descendant of it\n\t *\n\t * @return vector of content type T\n\t */\n\tstd::vector<T> getElements() const {\n\t\tif (isLeaf) {\n\t\t\treturn content;\n\t\t} else {\n\t\t\tassert(content.size() == 0);\n\t\t\tassert(positions.size() == 0);\n\t\t\tvector<T> result;\n\t\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\t\tstd::vector<T> subresult = children[i].getElements();\n\t\t\t\tresult.insert(result.end(), subresult.begin(), subresult.end());\n\t\t\t}\n\t\t\treturn result;\n\t\t}\n\t}\n\n\tvoid getCoordinates(vector<Point<double> > &pointContainer) const {\n\t\tif (isLeaf) {\n\t\t\tpointContainer.insert(pointContainer.end(), positions.begin(), positions.end());\n\t\t}\n\t\telse {\n\t\t\tassert(content.size() == 0);\n\t\t\tassert(positions.size() == 0);\n\t\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\t\tchildren[i].getCoordinates(pointContainer);\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * Main query method, get points lying in a Euclidean circle around the center point.\n\t * Optional limits can be given to get a different result or to reduce unnecessary comparisons\n\t *\n\t * Elements are pushed onto a vector which is a required argument. This is done to reduce copying.\n\t * (Maybe not necessary due to copy elisison)\n\t *\n\t * Safe to call in parallel.\n\t *\n\t * @param center Center of the query circle\n\t * @param radius Radius of the query circle\n\t * @param result Reference to the vector where the results will be stored\n\t * @param minAngle Optional value for the minimum angular coordinate of the query region\n\t * @param maxAngle Optional value for the maximum angular coordinate of the query region\n\t * @param lowR Optional value for the minimum radial coordinate of the query region\n\t * @param highR Optional value for the maximum radial coordinate of the query region\n\t */\n\tvoid getElementsInEuclideanCircle(Point<double> center, double radius, vector<T> &result) const {\n\t\tif (outOfReach(center, radius)) {\n\t\t\treturn;\n\t\t}\n\n\t\tif (isLeaf) {\n\t\t\tconst double rsq = radius*radius;\n\t\t\tconst count cSize = content.size();\n\n\t\t\tfor (int i=0; i < cSize; i++) {\n\t\t\t\tif (positions[i].squaredDistance(center) < rsq) {\n\t\t\t\t\tresult.push_back(content[i]);\n\t\t\t\t\tif (content[i] >= sanityNodeLimit) DEBUG(\"Quadnode content \", content[i], \" found, suspiciously high!\");\n\t\t\t\t\tassert(content[i] < sanityNodeLimit);\n\t\t\t\t}\n\t\t\t}\n\t\t}\telse {\n\t\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\t\tchildren[i].getElementsInEuclideanCircle(center, radius, result);\n\t\t\t}\n\t\t}\n\t}\n\n\tcount getElementsProbabilistically(Point<double> euQuery, std::function<double(double)> prob, vector<T> &result) const {\n\t\tTRACE(\"Getting Euclidean distances\");\n\t\tauto distancePair = EuclideanDistances(euQuery);\n\t\tdouble probUB = prob(distancePair.first);\n\t\tdouble probLB = prob(distancePair.second);\n\t\tassert(probLB <= probUB);\n\t\tif (probUB > 0.5) probUB = 1;\n\t\tif (probUB == 0) return 0;\n\t\t//TODO: return whole if probLB == 1\n\t\tdouble probdenom = std::log(1-probUB);\n\t\tif (probdenom == 0) return 0;//there is a very small probability, but we cannot process it.\n\t\tTRACE(\"probUB: \", probUB, \", probdenom: \", probdenom);\n\n\t\tcount expectedNeighbours = probUB*size();\n\t\tcount candidatesTested = 0;\n\t\tcount incomingNeighbours = result.size();\n\t\tcount ownsize = size();\n\n\n\t\tif (isLeaf) {\n\t\t\tconst count lsize = content.size();\n\t\t\tTRACE(\"Leaf of size \", lsize);\n\t\t\tfor (int i = 0; i < lsize; i++) {\n\t\t\t\t//jump!\n\t\t\t\tif (probUB < 1) {\n\t\t\t\t\tdouble random = Aux::Random::real();\n\t\t\t\t\tdouble delta = std::log(random) / probdenom;\n\t\t\t\t\tassert(delta >= 0);\n\t\t\t\t\ti += delta;\n\t\t\t\t\tif (i >= lsize) break;\n\t\t\t\t\tTRACE(\"Jumped with delta \", delta, \" arrived at \", i);\n\t\t\t\t}\n\t\t\t\tassert(i >= 0);\n\n\t\t\t\t//see where we've arrived\n\t\t\t\tcandidatesTested++;\n\t\t\t\tdouble distance = positions[i].distance(euQuery);\n\t\t\t\tassert(distance >= distancePair.first);//TODO: These should not fail!\n\t\t\t\tassert(distance <= distancePair.second);\n\t\t\t\tdouble q = prob(distance);\n\t\t\t\tq = q / probUB; //since the candidate was selected by the jumping process, we have to adjust the probabilities\n\t\t\t\tassert(q <= 1);\n\n\t\t\t\t//accept?\n\t\t\t\tdouble acc = Aux::Random::real();\n\t\t\t\tif (acc < q) {\n\t\t\t\t\tTRACE(\"Accepted node \", i, \" with probability \", q, \".\");\n\t\t\t\t\tresult.push_back(content[i]);\n\t\t\t\t}\n\t\t\t}\n\t\t}\telse {\n\t\t\tif (expectedNeighbours < 4 || probUB < 1/1000) {//select candidates directly instead of calling recursively\n\t\t\t\tTRACE(\"probUB = \", probUB, \", switching to direct candidate selection.\");\n\t\t\t\tassert(probUB < 1);\n\t\t\t\tconst count stsize = size();\n\t\t\t\tfor (index i = 0; i < stsize; i++) {\n\t\t\t\t\tdouble delta = std::log(Aux::Random::real()) / probdenom;\n\t\t\t\t\tassert(delta >= 0);\n\t\t\t\t\ti += delta;\n\t\t\t\t\tTRACE(\"Jumped with delta \", delta, \" arrived at \", i, \". Calling maybeGetKthElement.\");\n\t\t\t\t\tif (i < size()) maybeGetKthElement(probUB, euQuery, prob, i, result);//this could be optimized. As of now, the offset is subtracted separately for each point\n\t\t\t\t\telse break;\n\t\t\t\t\tcandidatesTested++;\n\t\t\t\t}\n\t\t\t} else {//carry on as normal\n\t\t\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\t\t\tTRACE(\"Recursively calling child \", i);\n\t\t\t\t\tcandidatesTested += children[i].getElementsProbabilistically(euQuery, prob, result);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tcount finalNeighbours = result.size();\n\t\tif (probLB == 1) assert(finalNeighbours == incomingNeighbours + ownsize);\n\t\treturn candidatesTested;\n\t}\n\n\n\tvoid maybeGetKthElement(double upperBound, Point<double> euQuery, std::function<double(double)> prob, index k, vector<T> &circleDenizens) const {\n\t\tTRACE(\"Maybe get element \", k, \" with upper Bound \", upperBound);\n\t\tassert(k < size());\n\t\tif (isLeaf) {\n\t\t\tdouble acceptance = prob(euQuery.distance(positions[k]))/upperBound;\n\t\t\tTRACE(\"Is leaf, accept with \", acceptance);\n\t\t\tif (Aux::Random::real() < acceptance) circleDenizens.push_back(content[k]);\n\t\t} else {\n\t\t\tTRACE(\"Call recursively.\");\n\t\t\tindex offset = 0;\n\t\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\t\tcount childsize = children[i].size();\n\t\t\t\tif (k - offset < childsize) {\n\t\t\t\t\tchildren[i].maybeGetKthElement(upperBound, euQuery, prob, k - offset, circleDenizens);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\toffset += childsize;\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * Shrink all vectors in this subtree to fit the content.\n\t * Call after quadtree construction is complete, causes better memory usage and cache efficiency\n\t */\n\tvoid trim() {\n\t\tcontent.shrink_to_fit();\n\t\tpositions.shrink_to_fit();\n\t\tif (!isLeaf) {\n\t\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\t\tchildren[i].trim();\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\n\t * Number of points lying in the region managed by this QuadNode\n\t */\n\tcount size() const {\n\t\treturn isLeaf ? content.size() : subTreeSize;\n\t}\n\n\tvoid recount() {\n\t\tsubTreeSize = 0;\n\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\tchildren[i].recount();\n\t\t\tsubTreeSize += children[i].size();\n\t\t}\n\t}\n\n\t/**\n\t * Height of subtree hanging from this QuadNode\n\t */\n\tcount height() const {\n\t\tcount result = 1;//if leaf node, the children loop will not execute\n\t\tfor (auto child : children) result = std::max(result, child.height()+1);\n\t\treturn result;\n\t}\n\n\t/**\n\t * Leaf cells in the subtree hanging from this QuadNode\n\t */\n\tcount countLeaves() const {\n\t\tif (isLeaf) return 1;\n\t\tcount result = 0;\n\t\tfor (index i = 0; i < children.size(); i++) {\n\t\t\tresult += children[i].countLeaves();\n\t\t}\n\t\treturn result;\n\t}\n\n\tindex getID() const {\n\t\treturn ID;\n\t}\n\n\tindex indexSubtree(index nextID) {\n\t\tindex result = nextID;\n\t\tassert(children.size() == pow(2,dimension) || children.size() == 0);\n\t\tfor (int i = 0; i < children.size(); i++) {\n\t\t\tresult = children[i].indexSubtree(result);\n\t\t}\n\t\tthis->ID = result;\n\t\treturn result+1;\n\t}\n\n\tindex getCellID(Point<double> pos) const {\n\t\tif (!responsible(pos)) return -1;\n\t\tif (isLeaf) return getID();\n\t\telse {\n\t\t\tfor (int i = 0; i < children.size(); i++) {\n\t\t\t\tindex childresult = children[i].getCellID(pos);\n\t\t\t\tif (childresult >= 0) return childresult;\n\t\t\t}\n\t\t\tassert(false); //if responsible\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tindex getMaxIDInSubtree() const {\n\t\tif (isLeaf) return getID();\n\t\telse {\n\t\t\tindex result = -1;\n\t\t\tfor (int i = 0; i < children.size(); i++) {\n\t\t\t\tresult = std::max(children[i].getMaxIDInSubtree(), result);\n\t\t\t}\n\t\t\treturn std::max(result, getID());\n\t\t}\n\t}\n\n\tcount reindex(count offset) {\n\t\tif (isLeaf)\n\t\t{\n\t\t\t#pragma omp task\n\t\t\t{\n\t\t\t\tindex p = offset;\n\t\t\t\tstd::generate(content.begin(), content.end(), [&p](){return p++;});\n\t\t\t}\n\t\t\toffset += size();\n\t\t} else {\n\t\t\tfor (int i = 0; i < children.size(); i++) {\n\t\t\t\toffset = children[i].reindex(offset);\n\t\t\t}\n\t\t}\n\t\treturn offset;\n\t}\n};\n}\n\n#endif /* QUADNODE_H_ */\n" }, { "alpha_fraction": 0.6566709876060486, "alphanum_fraction": 0.6640591025352478, "avg_line_length": 22.96875, "blob_id": "26dbf9fb104f372d1f09e94af766c2bbd7a2bea6", "content_id": "45d9882c74932d83e2b4ffa58d790716e0aea7f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2301, "license_type": "permissive", "max_line_length": 75, "num_lines": 96, "path": "/networkit/cpp/spanning/RandomSpanningTree.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * RandomSpanningTree.cpp\n *\n * Created on: 20.06.2015\n * Author: Henning\n */\n\n#include \"RandomSpanningTree.h\"\n#include \"../structures/UnionFind.h\"\n#include \"../graph/Sampling.h\"\n#include <random>\n\nnamespace NetworKit {\n\nRandomSpanningTree::RandomSpanningTree(const Graph& G): g(G) {\n\tedges.clear();\n\tedges.resize(G.numberOfEdges());\n\tcount i = 0;\n\tG.forEdges([&](node u, node v){\n\t\tedges[i] = std::make_pair(u,v);\n\t\ti ++;\n\t});\n}\n\nRandomSpanningTree::~RandomSpanningTree() {\n}\n\n\nvoid RandomSpanningTree::run() {\n\n\t// TODO: handle disconnected graphs\n\n\tcount n = g.numberOfNodes();\n\tGraph randTree(n);\n\tcount numVisited = 0;\n\tstd::vector<bool> visited(n, false);\n\n\t// find and process root\n\tnode curr = Sampling::randomNode(g);\n\tvisited[curr] = true;\n\tnumVisited++;\n\n\twhile (numVisited < n) {\n\t\t// get random neighbor\n\t\tnode neigh = g.randomNeighbor(curr);\n\n\t\t// if not seen before, insert tree edge\n\t\tif (! visited[neigh]) {\n\t\t\trandTree.addEdge(curr, neigh);\n\t\t\tvisited[neigh] = true;\n\t\t\t++numVisited;\n\t\t}\n\n\t\t// move to neighbor\n\t\tcurr = neigh;\n\t}\n\n\ttree = randTree;\n}\n\nvoid RandomSpanningTree::run2() {\n\n\t// TODO: handle disconnected graphs\n\tcount n = g.numberOfNodes();\n\t// std::default_random_engine generator;\n\t// std::uniform_int_distribution<int> distribution(0,g.numberOfEdges()-1);\n\tstd::random_shuffle (edges.begin(), edges.end());\n\tGraph randTree(n);\n\tUnionFind part(n);\n\t// INFO(\"Number of partitions: \", part.numberOfSubsets());\n\tcount iter = 0, edgesTree = 0;\n\twhile(edgesTree < n-1) {\n\t//\tINFO(\"Number of partitions: \", part.numberOfSubsets());\n\t\tstd::pair<node, node> rand_edge = edges[iter];\n\t//\tINFO(rand_edge.first, \" \", rand_edge.second);\n\t\tif (part.find(rand_edge.first) != part.find(rand_edge.second)) {\n\t\t\t// INFO(\"Partition of the first: \", rand_edge.first);\n\t\t\t// INFO(\"Partition of the second: \", rand_edge.second);\n\t\t\t// INFO(\"Merging two partitions. Before :\", part.numberOfSubsets());\n\t\t\trandTree.addEdge(rand_edge.first, rand_edge.second);\n\t\t\tpart.merge(part.find(rand_edge.first), part.find(rand_edge.second));\n\t\t\tedgesTree ++;\n\t\t\t// INFO(\"AFter: \", part.numberOfSubsets());\n\t\t}\n\t\titer ++;\n\t}\n\tINFO(\"Iter: \", iter);\n\tassert(randTree.numberOfEdges() == n-1);\n\ttree = randTree;\n}\n\nGraph RandomSpanningTree::getTree() {\n\treturn tree;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6471571922302246, "alphanum_fraction": 0.654403567314148, "avg_line_length": 24.628570556640625, "blob_id": "e45c28ec48665697ebc99446f5bb56c4f7990670", "content_id": "664135016a35a5df1e787960ef394b8cb4aea6b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3588, "license_type": "permissive", "max_line_length": 138, "num_lines": 140, "path": "/networkit/cpp/centrality/Betweenness.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Betweenness.cpp\n *\n * Created on: 29.07.2014\n * Author: cls, ebergamini\n */\n\n#include <stack>\n#include <queue>\n#include <memory>\n#include <omp.h>\n\n\n#include \"Betweenness.h\"\n#include \"../auxiliary/PrioQueue.h\"\n#include \"../auxiliary/Log.h\"\n#include \"../auxiliary/SignalHandling.h\"\n#include \"../graph/SSSP.h\"\n#include \"../graph/Dijkstra.h\"\n#include \"../graph/BFS.h\"\n\nnamespace NetworKit {\n\nBetweenness::Betweenness(const Graph& G, bool normalized, bool computeEdgeCentrality) : Centrality(G, normalized, computeEdgeCentrality) {\n\n}\n\nvoid Betweenness::run() {\n\tAux::SignalHandler handler;\n\tcount z = G.upperNodeIdBound();\n\tscoreData.clear();\n\tscoreData.resize(z);\n\tif (computeEdgeCentrality) {\n\t\tcount z2 = G.upperEdgeIdBound();\n\t\tedgeScoreData.clear();\n\t\tedgeScoreData.resize(z2);\n\t}\n\n\t// thread-local scores for efficient parallelism\n\tcount maxThreads = omp_get_max_threads();\n\tstd::vector<std::vector<double> > scorePerThread(maxThreads, std::vector<double>(G.upperNodeIdBound()));\n\tDEBUG(\"score per thread: \", scorePerThread.size());\n\tDEBUG(\"G.upperEdgeIdBound(): \", G.upperEdgeIdBound());\n\tstd::vector<std::vector<double> > edgeScorePerThread;\n\tif (computeEdgeCentrality) {\n\t\tedgeScorePerThread.resize(maxThreads, std::vector<double>(G.upperEdgeIdBound()));\n\t}\n\tDEBUG(\"edge score per thread: \", edgeScorePerThread.size());\n\n\tauto computeDependencies = [&](node s) {\n\n\t\tstd::vector<double> dependency(z, 0.0);\n\n\t\t// run SSSP algorithm and keep track of everything\n\t\tstd::unique_ptr<SSSP> sssp;\n\t\tif (G.isWeighted()) {\n\t\t\tsssp.reset(new Dijkstra(G, s, true, true));\n\t\t} else {\n\t\t\tsssp.reset(new BFS(G, s, true, true));\n\t\t}\n\t\tif (!handler.isRunning()) return;\n\t\tsssp->run();\n\t\tif (!handler.isRunning()) return;\n\t\t// compute dependencies for nodes in order of decreasing distance from s\n\t\tstd::vector<node> stack = sssp->getStack();\n\t\twhile (!stack.empty()) {\n\t\t\tnode t = stack.back();\n\t\t\tstack.pop_back();\n\t\t\tfor (node p : sssp->getPredecessors(t)) {\n\t\t\t\t// workaround for integer overflow in large graphs\n\t\t\t\tbigfloat tmp = sssp->numberOfPaths(p) / sssp->numberOfPaths(t);\n\t\t\t\tdouble weight;\n\t\t\t\ttmp.ToDouble(weight);\n\t\t\t\tdouble c= weight * (1 + dependency[t]);\n\t\t\t\tdependency[p] += c;\n\t\t\t\tif (computeEdgeCentrality) {\n\t\t\t\t\tedgeScorePerThread[omp_get_thread_num()][G.edgeId(p,t)] += c;\n\t\t\t\t}\n\n\n\t\t\t}\n\t\t\tif (t != s) {\n\t\t\t\tscorePerThread[omp_get_thread_num()][t] += dependency[t];\n\t\t\t}\n\t\t}\n\t};\n\thandler.assureRunning();\n\tG.balancedParallelForNodes(computeDependencies);\n\thandler.assureRunning();\n\tDEBUG(\"adding thread-local scores\");\n\t// add up all thread-local values\n\tfor (const auto &local : scorePerThread) {\n\t\tG.parallelForNodes([&](node v){\n\t\t\tscoreData[v] += local[v];\n\t\t});\n\t}\n\tif (computeEdgeCentrality) {\n\t\tfor (const auto &local : edgeScorePerThread) {\n\t\t\tfor (count i = 0; i < local.size(); i++) {\n\t\t\t\tedgeScoreData[i] += local[i];\n\t\t\t}\n\t\t}\n\t}\n\tif (normalized) {\n\t\t// divide by the number of possible pairs\n\t\tcount n = G.numberOfNodes();\n\t\tcount pairs = (n-2) * (n-1);\n\t\tcount edges = n * (n-1);\n\t\tif (!G.isDirected()) {\n\t\t\tpairs = pairs / 2;\n\t\t\tedges = edges / 2;\n\t\t}\n\t\tG.forNodes([&](node u){\n\t\t\tscoreData[u] = scoreData[u] / pairs;\n\t\t});\n\t\tif (computeEdgeCentrality) {\n\t\t\tfor (count edge = 0; edge < edgeScoreData.size(); edge++) {\n\t\t\t\tedgeScoreData[edge] = edgeScoreData[edge] / edges;\n\t\t\t}\n\t\t}\n\t}\n\n\thasRun = true;\n}\n\ndouble Betweenness::maximum(){\n\tif (normalized) {\n\t\treturn 1;\n\t}\n\tdouble score;\n\tcount n = G.numberOfNodes();\n\tif (G.isDirected()) {\n\t\tscore = (n-1)*(n-2);\n\t} else {\n\t\tscore = (n-1)*(n-2)/2;\n\t}\n\treturn score;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.694372296333313, "alphanum_fraction": 0.7021645307540894, "avg_line_length": 23.0625, "blob_id": "e8e963156d809364f1bcdf8bf6b2c284802cf23f", "content_id": "0722732c290290cb66f5a17081a53d4b421d2bfe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1155, "license_type": "permissive", "max_line_length": 92, "num_lines": 48, "path": "/networkit/cpp/graph/test/GraphBuilderDirectSwapGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphBuilderDirectSwapGTest.h\n *\n * Created on: 14.08.2014\n * Author: Marvin Ritter ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef GRAPH_BUILDER_DIRECT_SWAP_GTEST_H_\n#define GRAPH_BUILDER_DIRECT_SWAP_GTEST_H_\n\n#include <tuple>\n#include <gtest/gtest.h>\n\n#include \"../Graph.h\"\n#include \"../GraphBuilder.h\"\n\nnamespace NetworKit {\n\nclass GraphBuilderDirectSwapGTest: public testing::TestWithParam< std::tuple<bool, bool> > {\npublic:\n\tvirtual void SetUp();\n\nprotected:\n\tGraphBuilder bHouse;\n\tstd::vector< std::pair<node, node> > houseEdgesOut;\n\tstd::vector< std::vector<edgeweight> > Ahouse;\n\tcount n_house;\n\tcount m_house;\n\n\tbool isGraph() const { return !isWeighted() && !isDirected(); }\n\tbool isWeightedGraph() const { return isWeighted() && !isDirected(); }\n\tbool isDirectedGraph() const { return !isWeighted() && isDirected(); }\n\tbool isWeightedDirectedGraph() const { return isWeighted() && isDirected(); }\n\n\tbool isWeighted() const;\n\tbool isDirected() const;\n\n\tGraphBuilder createGraphBuilder(count n = 0) const;\n\tGraph toGraph(GraphBuilder& b) const;\n};\n\n} /* namespace NetworKit */\n\n#endif /* GRAPH_BUILDER_DIRECT_SWAP_GTEST_H_ */\n\n#endif /* NOGTEST */\n" }, { "alpha_fraction": 0.6660616993904114, "alphanum_fraction": 0.713248610496521, "avg_line_length": 21.040000915527344, "blob_id": "b9be2d95f47d18ead5e71becb67ede03a3b1ffac", "content_id": "15007844a1a9f19c987247c73c0161ad89ec1c0a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 551, "license_type": "permissive", "max_line_length": 58, "num_lines": 25, "path": "/networkit/cpp/centrality/test/ApproxBetweennessGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ApproxBetweennessGTest.cpp\n *\n * Created on: 30.06.2014\n * Author: moritzl\n */\n\n#include \"ApproxBetweennessGTest.h\"\n#include \"../ApproxBetweenness.h\"\n#include \"../Betweenness.h\"\n#include \"../../generators/ErdosRenyiGenerator.h\"\n#include \"../../generators/DorogovtsevMendesGenerator.h\"\n#include \"../../distance/Diameter.h\"\n\nnamespace NetworKit {\n\n\nTEST_F(ApproxBetweennessGTest, benchApproxDiameterErdos) {\n\tErdosRenyiGenerator gen(10000,0.001);\n\tGraph G1 = gen.generate();\n\tApproxBetweenness approx(G1, 0.05, 0.1, 20);\n\tapprox.run();\n}\n\n}\n" }, { "alpha_fraction": 0.6648044586181641, "alphanum_fraction": 0.6871508359909058, "avg_line_length": 13.319999694824219, "blob_id": "39ad2db80a34e47f2713d4ca87e52e2e9eca2cc1", "content_id": "4dab70ab42677ef0055b6f8e4b0176664cb4882e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 358, "license_type": "permissive", "max_line_length": 50, "num_lines": 25, "path": "/networkit/cpp/sparsification/test/LocalSimilarityGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LocalSimilarityGTest.h\n *\n * Created on: 26.07.2014\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#ifndef LOCALSIMILARITYTEST_H_\n#define LOCALSIMILARITYTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass LocalSimilarityGTest: public testing::Test {\n\n};\n\n\n} /* namespace NetworKit */\n#endif /* LOCALSIMILARITYTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6462212204933167, "alphanum_fraction": 0.6922234296798706, "avg_line_length": 19.75, "blob_id": "69262c68196487db7f2010edd78866a1c9074ea5", "content_id": "b9f729b3cb7f819fc4489822948eab39787025cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 913, "license_type": "permissive", "max_line_length": 61, "num_lines": 44, "path": "/networkit/cpp/sparsification/test/LocalSimilarityGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LocalSimilarityGTest.cpp\n *\n * Created on: 26.07.2014\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#include \"LocalSimilarityGTest.h\"\n\n#include \"../LocalSimilarityScore.h\"\n#include \"../../edgescores/ChibaNishizekiTriangleEdgeScore.h\"\n\n\nnamespace NetworKit {\n\nTEST_F(LocalSimilarityGTest, testAttributeSimple) {\n\tGraph g(4);\n\n\tg.addEdge(0, 1);\n\tg.addEdge(0, 3);\n\tg.addEdge(0, 2);\n\tg.addEdge(1, 2);\n\tg.indexEdges();\n\n\tChibaNishizekiTriangleEdgeScore triangleEdgeScore(g);\n\ttriangleEdgeScore.run();\n\tstd::vector<count> triangles = triangleEdgeScore.scores();\n\n\tLocalSimilarityScore localSim(g, triangles);\n\tlocalSim.run();\n\tstd::vector<double> exp = localSim.scores();\n\n\tEXPECT_DOUBLE_EQ(1.0, exp[g.edgeId(0, 1)]);\n\tEXPECT_NEAR(0.36907025, exp[g.edgeId(0, 2)], 1e-7);\n\tEXPECT_DOUBLE_EQ(1.0, exp[g.edgeId(0, 3)]);\n\tEXPECT_DOUBLE_EQ(1.0, exp[g.edgeId(1, 2)]);\n}\n\n}\n/* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6748466491699219, "alphanum_fraction": 0.699386477470398, "avg_line_length": 15.300000190734863, "blob_id": "c9fd17858d72942f1de216bbce4635de3000955e", "content_id": "dd1b8522b17b3fddc4cd444cb7e8197feda7471f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 326, "license_type": "permissive", "max_line_length": 49, "num_lines": 20, "path": "/networkit/cpp/centrality/test/DynBetweennessGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynBetweennessGTest.h\n *\n * Created on: 05.08.2014\n * Author: ebergamini, cls\n */\n\n#ifndef DYNBETWEENNESSGTEST_H_\n#define DYNBETWEENNESSGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass DynBetweennessGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* DYNBETWEENNESSGTEST_H_ */\n" }, { "alpha_fraction": 0.6865384578704834, "alphanum_fraction": 0.7019230723381042, "avg_line_length": 15.25, "blob_id": "80ade2b35435274a86aa5a4b336203e9277ce018", "content_id": "0557b109ca043eb281206c97f3d6f11f481195be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 520, "license_type": "permissive", "max_line_length": 51, "num_lines": 32, "path": "/networkit/cpp/algebraic/test/AdjacencyMatrixGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * AdjacencyMatrixGTest.h\n *\n * Created on: 02.04.2014\n * Author: Michael\n */\n\n#ifndef NOGTEST\n\n#ifndef ADJACENCYMATRIXGTEST_H_\n#define ADJACENCYMATRIXGTEST_H_\n\n#include \"gtest/gtest.h\"\n#include \"../AdjacencyMatrix.h\"\n#include \"../../graph/Graph.h\"\n#include \"../../io/METISGraphReader.h\"\n\n\nnamespace NetworKit {\n\nclass AdjacencyMatrixGTest : public testing::Test {\npublic:\n\tAdjacencyMatrixGTest();\n\tvirtual ~AdjacencyMatrixGTest();\n};\n\n\n} /* namespace NetworKit */\n\n#endif /* ADJACENCYMATRIXGTEST_H_ */\n\n#endif\n" }, { "alpha_fraction": 0.6930946111679077, "alphanum_fraction": 0.7135549783706665, "avg_line_length": 14.640000343322754, "blob_id": "f7aa1bcc80fe8bfd7616d7a2403c522bb8690de3", "content_id": "1e4764540ff930b7aac7c85c53e9ccd9d41e4581", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 391, "license_type": "permissive", "max_line_length": 68, "num_lines": 25, "path": "/networkit/cpp/edgescores/test/ChibaNishizekiQuadrangleEdgeScoreGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ChibaNishizekiQuadrangleEdgeScoreGTest.h\n *\n * Created on: 23.05.2014\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#ifndef CHIBANISHIZEKITEST_H_\n#define CHIBANISHIZEKITEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass ChibaNishizekiQuadrangleEdgeScoreGTest: public testing::Test {\n\n};\n\n\n} /* namespace NetworKit */\n#endif /* CHIBANISHIZEKITEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6417461037635803, "alphanum_fraction": 0.6487706899642944, "avg_line_length": 26.68055534362793, "blob_id": "2f4511d5ad3c822bf583031c8697aed8fb875511", "content_id": "41acdf962b2f21d8181197ccf07bd2d4937267b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1993, "license_type": "permissive", "max_line_length": 81, "num_lines": 72, "path": "/networkit/cpp/graph/test/SpanningGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SpanningGTest.cpp\n *\n * Created on: 03.09.2015\n * Author: Henning\n */\n\n#include \"SpanningGTest.h\"\n#include \"../KruskalMSF.h\"\n#include \"../RandomSpanningForest.h\"\n#include \"../SpanningForest.h\"\n#include \"../../io/METISGraphReader.h\"\n\nnamespace NetworKit {\n\nTEST_F(SpanningGTest, testKruskalMinSpanningForest) {\n\tMETISGraphReader reader;\n\tstd::vector<std::string> graphs = {\"karate\", \"jazz\", \"celegans_metabolic\"};\n\n\tfor (auto graphname: graphs) {\n\t\tstd::string filename = \"input/\" + graphname + \".graph\";\n\t\tGraph G = reader.read(filename);\n\t\tKruskalMSF msf(G);\n\t\tmsf.run();\n\t\tGraph T = msf.getForest();\n\n\t\t// check that each node has an edge in the spanning tree (if it had one before)\n\t\tT.forNodes([&](node u) {\n\t\t\tEXPECT_TRUE(T.degree(u) > 0 || G.degree(u) == 0);\n\t\t});\n\t}\n}\n\nTEST_F(SpanningGTest, testRandomSpanningTree) {\n\tMETISGraphReader reader;\n\tstd::vector<std::string> graphs = {\"karate\", \"jazz\", \"celegans_metabolic\"};\n\n\tfor (auto graphname: graphs) {\n\t\tstd::string filename = \"input/\" + graphname + \".graph\";\n\t\tGraph G = reader.read(filename);\n\t\tRandomSpanningForest rst(G);\n\t\trst.run();\n\t\tGraph T = rst.getForest();\n\n\t\t// check that each node has an edge in the spanning tree (if it had one before)\n\t\tT.forNodes([&](node u) {\n\t\t\tEXPECT_TRUE(T.degree(u) > 0 || G.degree(u) == 0);\n\t\t});\n\t}\n}\n\nTEST_F(SpanningGTest, testSpanningForest) {\n\tMETISGraphReader reader;\n\tstd::vector<std::string> graphs = {\"karate\", \"jazz\", \"celegans_metabolic\"};\n\n\tfor (auto graphname: graphs) {\n\t\tstd::string filename = \"input/\" + graphname + \".graph\";\n\t\tGraph G = reader.read(filename);\n\t\tSpanningForest msf(G);\n\t\tGraph T = msf.generate();\n\n\t\tINFO(\"tree / graph edges: \", T.numberOfEdges(), \" / \", G.numberOfEdges());\n\n\t\t// check that each node has an edge in the spanning tree (if it had one before)\n\t\tT.forNodes([&](node u) {\n//\t\t\tINFO(\"tree/graph node degree: \", T.degree(u), \" / \", G.degree(u));\n\t\t\tEXPECT_TRUE(T.degree(u) > 0 || G.degree(u) == 0);\n\t\t});\n\t}\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6305176019668579, "alphanum_fraction": 0.6318619847297668, "avg_line_length": 24.797687530517578, "blob_id": "6fc3ba7c52207b86366b5b8a40636c376038133e", "content_id": "847c288f79cfaf31eb33a242fe811f8c52a0992b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4463, "license_type": "permissive", "max_line_length": 88, "num_lines": 173, "path": "/networkit/cpp/auxiliary/Log.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "#ifndef LOG_H_\n#define LOG_H_\n\n#include <iostream>\n#include <mutex>\n\n#include \"StringBuilder.h\"\n\n#ifdef NOLOGGING\n\n#define FATAL(...) do{}while(false)\n#define ERROR(...) do{}while(false)\n#define WARN(...) do{}while(false)\n#define INFO(...) do{}while(false)\n#define DEBUG(...) do{}while(false)\n#define TRACE(...) do{}while(false)\n\n#define FATALF(...) do{}while(false)\n#define ERRORF(...) do{}while(false)\n#define WARNF(...) do{}while(false)\n#define INFOF(...) do{}while(false)\n#define DEBUGF(...) do{}while(false)\n#define TRACEF(...) do{}while(false)\n\n#define TRACEPOINT do{}while(false)\n\n#else // NOLOGGING\n\n#define LOG_LEVEL_FATAL 0\n#define LOG_LEVEL_ERROR 1\n#define LOG_LEVEL_WARN 2\n#define LOG_LEVEL_INFO 3\n#define LOG_LEVEL_DEBUG 4\n#define LOG_LEVEL_TRACE 5\n\n\n#if !defined(LOG_LEVEL)\n#define LOG_LEVEL LOG_LEVEL_TRACE\n#endif\n\n#if LOG_LEVEL >= LOG_LEVEL_FATAL\n#define FATAL(...) ::Aux::Log::log({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::fatal, __VA_ARGS__)\n#define FATALF(...) ::Aux::Log::logF({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::fatal, __VA_ARGS__)\n#else\n#define FATAL(...) do{}while(false)\n#define FATALF(...) do{}while(false)\n#endif\n\n#if LOG_LEVEL >= LOG_LEVEL_ERROR\n#define ERROR(...) ::Aux::Log::log({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::error, __VA_ARGS__)\n#define ERRORF(...) ::Aux::Log::logF({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::error, __VA_ARGS__)\n#else\n#define ERROR(...) do{}while(false)\n#define ERRORF(...) do{}while(false)\n#endif\n\n#if LOG_LEVEL >= LOG_LEVEL_WARN\n#define WARN(...) ::Aux::Log::log({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::warn, __VA_ARGS__)\n#define WARNF(...) ::Aux::Log::logF({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::warn, __VA_ARGS__)\n#else\n#define WARN(...) do{}while(false)\n#define WARNF(...) do{}while(false)\n#endif\n\n#if LOG_LEVEL >= LOG_LEVEL_INFO\n#define INFO(...) ::Aux::Log::log({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::info, __VA_ARGS__)\n#define INFOF(...) ::Aux::Log::logF({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::info, __VA_ARGS__)\n#else\n#define INFO(...) do{}while(false)\n#define INFOF(...) do{}while(false)\n#endif\n\n#if LOG_LEVEL >= LOG_LEVEL_DEBUG\n#define DEBUG(...) ::Aux::Log::log({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::debug, __VA_ARGS__)\n#define DEBUGF(...) ::Aux::Log::logF({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::debug, __VA_ARGS__)\n#else\n#define DEBUG(...) do{}while(false)\n#define DEBUGF(...) do{}while(false)\n#endif\n\n#if LOG_LEVEL >= LOG_LEVEL_TRACE\n#define TRACE(...) ::Aux::Log::log({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::trace, __VA_ARGS__)\n#define TRACEF(...) ::Aux::Log::logF({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::trace, __VA_ARGS__)\n#define TRACEPOINT ::Aux::Log::log({__FILE__, __PRETTY_FUNCTION__, __LINE__},\\\n\t\t::Aux::Log::LogLevel::trace, \"tracepoint\")\n#else\n#define TRACE(...) do{}while(false)\n#define TRACEF(...) do{}while(false)\n#define TRACEPOINT do{}while(false)\n#endif\n\n#endif // NOLOGGING\n\nnamespace Aux { namespace Log {\n\nstruct Location {\n\tconst char* file;\n\tconst char* function;\n\tconst int line;\n};\n\nenum class LogLevel {\n\ttrace,\n\tdebug,\n\tinfo,\n\twarn,\n\terror,\n\tfatal\n};\n\n/**\n * Accept loglevel as string and set.\n * @param logLevel as string\n */\nvoid setLogLevel(std::string logLevel);\n\n/**\n * @return current loglevel as string\n */\nstd::string getLogLevel();\n\nnamespace Settings {\n\nLogLevel getLogLevel();\nvoid setLogLevel(LogLevel p = LogLevel::info);\n\nvoid setPrintTime(bool b);\nbool getPrintTime();\n\nvoid setPrintLocation(bool b);\nbool getPrintLocation();\n\nvoid setLogfile(const std::string& filename);\n}\n\nnamespace Impl {\nvoid log(const Location& loc, LogLevel p, const std::string msg);\n} //namespace impl\n\ntemplate<typename...T>\nvoid log(const Location& loc, LogLevel p, const T&...args) {\n\tif(p >= Settings::getLogLevel()) {\n\t\tstd::stringstream stream;\n\t\tprintToStream(stream, args...);\n\t\tImpl::log(loc, p, stream.str());\n\t}\n}\n\ntemplate<typename...T>\nvoid logF(const Location& loc, LogLevel p, const std::string& format, const T&...args) {\n\tif(p >= Settings::getLogLevel()) {\n\t\tstd::stringstream stream;\n\t\tprintToStreamF(stream, format, args...);\n\t\tImpl::log(loc, p, stream.str());\n\t}\n}\n\n\n}} // namespace Aux::Log\n\n#endif\n" }, { "alpha_fraction": 0.6988150477409363, "alphanum_fraction": 0.7041730880737305, "avg_line_length": 30.105770111083984, "blob_id": "46a1f74c7e87c191d7aabdd1b79801b0bcb12a55", "content_id": "69e3d55b800dcea5f6f9eb4d170f260fe13c36f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9705, "license_type": "permissive", "max_line_length": 182, "num_lines": 312, "path": "/networkit/graphio.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "# extension imports\nfrom _NetworKit import (METISGraphReader, METISGraphWriter, DotGraphWriter, EdgeListWriter, \\\n\t\t\t\t\t\t GMLGraphWriter, LineFileReader, SNAPGraphWriter, DGSWriter, GraphToolBinaryWriter, GraphToolBinaryReader, \\\n\t\t\t\t\t\t DGSStreamParser, GraphUpdater, SNAPEdgeListPartitionReader, SNAPGraphReader, EdgeListReader, CoverReader, CoverWriter, EdgeListCoverReader, KONECTGraphReader, GMLGraphReader)\nfrom _NetworKit import Graph as __Graph\n# local imports\nfrom .GraphMLIO import GraphMLReader, GraphMLWriter\nfrom .GEXFIO import GEXFReader, GEXFWriter\nfrom . import algebraic\n\n# external imports\nimport os\nimport logging\ntry:\n\timport numpy\nexcept ImportError:\n\tprint(\"module 'numpy' not available - some functionality will be restricted\")\ntry:\n\timport scipy.io\nexcept ImportError:\n\tprint(\"module 'scipy' not available - some functionality will be restricted\")\nimport fnmatch\n\ntry:\n\tfrom enum import Enum\n\n\tclass __AutoNumber(Enum):\n\t\tdef __new__(cls):\n\t\t\tvalue = len(cls.__members__) + 1\n\t\t\tobj = object.__new__(cls)\n\t\t\tobj._value_ = value\n\t\t\treturn obj\n\n\n\tclass Format(__AutoNumber):\n\t\t\"\"\" Simple enumeration class to list supported file types. Currently supported\n\t\tfile types: SNAP, EdgeListSpaceZero, EdgeListSpaceOne, EdgeListTabZero, EdgeListTabOne,\n\t\tMETIS, GraphML, GEXF, GML, EdgeListCommaOne, GraphViz, DOT, EdgeList, LFR, KONEC, GraphToolBinary\"\"\"\n\t\tSNAP = ()\n\t\tEdgeListSpaceZero = ()\n\t\tEdgeListSpaceOne = ()\n\t\tEdgeListTabZero = ()\n\t\tEdgeListTabOne = ()\n\t\tMETIS = ()\n\t\tGraphML = ()\n\t\tGEXF = ()\n\t\tGML = ()\n\t\tEdgeListCommaOne = ()\n\t\tGraphViz = ()\n\t\tDOT = ()\n\t\tEdgeList = ()\n\t\tLFR = ()\n\t\tKONECT = ()\n\t\tGraphToolBinary = ()\n\t\tMAT = ()\n\nexcept ImportError:\n\tprint(\"Update to Python >=3.4 recommended - support for < 3.4 may be discontinued in the future\")\n\tclass Format:\n\t\tSNAP = \"snap\"\n\t\tEdgeListTabOne = \"edgelist-t1\"\n\t\tEdgeListTabZero = \"edgelist-t0\"\n\t\tEdgeListSpaceOne = \"edgelist-s1\"\n\t\tEdgeListSpaceZero = \"edgelist-s0\"\n\t\tMETIS = \"metis\"\n\t\tGraphML = \"graphml\"\n\t\tGEXF = \"gexf\"\n\t\tGML = \"gml\"\n\t\tEdgeListCommaOne = \"edgelist-cs1\"\n\t\tGraphViz = \"dot\"\n\t\tDOT = \"dot\"\n\t\tEdgeList = \"edgelist\"\n\t\tLFR = \"edgelist-t1\"\n\t\tKONECT = \"konect\"\n\t\tGraphToolBinary = \"gtbin\"\n\t\tMAT = \"mat\"\n\n\n\n\n\n# reading\n\ndef getReader(fileformat, **kwargs):\n\t#define your [edgelist] reader here:\n\treaders =\t{\n\t\t\tFormat.METIS:\t\t\tMETISGraphReader(),\n\t\t\tFormat.GraphML:\t\t\tGraphMLReader(),\n\t\t\tFormat.GEXF:\t\t\tGEXFReader(),\n\t\t\tFormat.SNAP:\t\t\tEdgeListReader('\\t',0,'#',False),\n\t\t\tFormat.EdgeListCommaOne:\tEdgeListReader(',',1,),\n\t\t\tFormat.EdgeListSpaceOne:\tEdgeListReader(' ',1),\n\t\t\tFormat.EdgeListSpaceZero:\tEdgeListReader(' ',0),\n\t\t\tFormat.EdgeListTabOne:\t\tEdgeListReader('\\t',1),\n\t\t\tFormat.EdgeListTabZero:\t\tEdgeListReader('\\t',0),\n\t\t\tFormat.LFR:\t\t\tEdgeListReader('\\t',1),\n\t\t\tFormat.KONECT:\t\t\tKONECTGraphReader(' '),\n\t\t\tFormat.GML:\t\t\tGMLGraphReader(),\n\t\t\tFormat.GraphToolBinary:\t\tGraphToolBinaryReader(),\n\t\t\tFormat.MAT:\t\t\tMatReader()\n\t\t\t}\n\n\ttry:\n\t\t# special case for custom Edge Lists\n\t\tif fileformat == Format.EdgeList:\n\t\t\tif kwargs[\"continuous\"] == False:\n\t\t\t\tkwargs[\"firstNode\"] = 0\n\t\t\treader = EdgeListReader(**kwargs)\n\t\telse:\n\t\t\treader = readers[fileformat]#(**kwargs)\n\texcept Exception or KeyError:\n\t\traise Exception(\"unrecognized format/format not supported as input: {0}\".format(fileformat))\n\treturn reader\n\n\ndef readGraph(path, fileformat, **kwargs):\n\t\"\"\" Read graph file in various formats and return a NetworKit::Graph\n\t Parameters:\n\t\t- fileformat: An element of the Format enumeration. Currently supported file types:\n\t\tSNAP, EdgeListSpaceZero, EdgeListSpaceOne, EdgeListTabZero, EdgeListTabOne, METIS,\n\t\tGraphML, GEXF, GML, EdgeListCommaOne, GraphViz, DOT, EdgeList, LFR, KONEC, GraphToolBinary\n\t\t- **kwargs: in case of a custom edge list, provide the defining paramaters as follows:\n\t\t\t\"separator=CHAR, firstNode=NODE, commentPrefix=STRING, continuous=BOOL\"\n\t\t\tcommentPrefix and continuous are optional\n\t\"\"\"\n\treader = getReader(fileformat,**kwargs)\n\n\n\tif (\"~\" in path):\n\t\tpath = os.path.expanduser(path)\n\t\tprint(\"path expanded to: {0}\".format(path))\n\tif not os.path.isfile(path):\n\t\traise IOError(\"{0} is not a file\".format(path))\n\telse:\n\t\twith open(path, \"r\") as file: # catch a wrong path before it crashes the interpreter\n\t\t\ttry:\n\t\t\t\tG = reader.read(path)\n\t\t\t\tG.setName(os.path.basename(path).split(\".\")[0])\t# set name of graph to name of file\n\t\t\t\treturn G\n\t\t\texcept Exception as e:\n\t\t\t\traise IOError(\"{0} is not a valid {1} file: {2}\".format(path,fileformat,e))\n\treturn None\n\ndef readGraphs(dirPath, pattern, fileformat, some=None, exclude=None, **kwargs):\n\t\"\"\"\n\tRead all graph files contained in a directory whose filename contains the pattern, return a dictionary of name to Graph object.\n Parameters:\n\t- pattern: unix-style string pattern\n\t- fileformat: An element of the Format enumeration\n\t- some: restrict number of graphs to be read\n\t- **kwargs: in case of a custom edge list, provide the defining paramaters as follows:\n\t\t\"separator=CHAR, firstNode=NODE, commentPrefix=STRING, continuous=BOOL\"\n\t\tcommentPrefix and continuous are optional\n\t\"\"\"\n\tgraphs = {}\n\tfor root, dirs, files in os.walk(dirPath):\n\t\tfor file in files:\n\t\t\tif fnmatch.fnmatch(file, pattern):\n\t\t\t\tif (exclude is None) or (not fnmatch.fnmatch(file, exclude)):\n\t\t\t\t\tG = readGraph(os.path.join(root, file), fileformat, **kwargs)\n\t\t\t\t\tgraphs[G.getName()] = G\n\t\t\t\t\tif some:\n\t\t\t\t\t\tif len(graphs) == some:\n\t\t\t\t\t\t\treturn graphs\n\treturn graphs\n\n\nclass MatReader:\n\tdef __init__(self, key = \"G\"):\n\t\tself.key = key\n\n\tdef read(self, path):\n\t\treturn readMat(path, self.key)\n\ndef readMat(path, key=\"G\"):\n\t\"\"\" Reads a Graph from a matlab object file containing an adjacency matrix and returns a NetworKit::Graph\n\t\tParameters:\n\t\t- key: The key of the adjacency matrix in the matlab object file (default: A)\"\"\"\n\tmatlabObject = scipy.io.loadmat(path)\n\t# result is a dictionary of variable names and objects, representing the matlab object\n\tif key in matlabObject:\n\t\tA = matlabObject[key]\n\telse:\n\t\traise Exception(\"Key {0} not found in the matlab object file\".format(key))\n\t(n, n2) = A.shape\n\tif n != n2:\n\t\traise Exception(\"this ({0}x{1}) matrix is not square\".format(n, n2))\n#\tif not numpy.array_equal(A, A.transpose): # FIXME this is slow and doesn't work as expected, seems to be False for valid inputs\n#\t\tlogging.warning(\"the adjacency matrix is not symmetric\")\n\tG = __Graph(n)\n\tnz = A.nonzero()\n\tfor (u,v) in zip(nz[0], nz[1]):\n\t\tif not G.hasEdge(u, v):\n\t\t\tG.addEdge(u, v)\n\treturn G\n\nclass MatWriter:\n\tdef __init__(self):\n\t\tself.key = key\n\n\tdef write(self, G, path, key=\"G\"):\n\t\twriteMat(path, key)\n\ndef writeMat(G, path, key=\"G\"):\n\t\"\"\" Writes a NetworKit::Graph to a Matlab object file.\n\t\tParameters:\n\t\t- G: The graph\n\t\t- path: Path of the matlab file\n\t\t- key: Dictionary Key\n\t\"\"\"\n\tmatrix = algebraic.adjacencyMatrix(G, matrixType='sparse')\n\tscipy.io.savemat(path, {key : matrix})\n\n\n# writing\ndef getWriter(fileformat, **kwargs):\n\twriters =\t{\n\t\t\tFormat.METIS:\t\t\tMETISGraphWriter(),\n\t\t\tFormat.GraphML:\t\t\tGraphMLWriter(),\n\t\t\tFormat.GEXF:\t\t\tGEXFWriter(),\n#\t\t\tFormat.SNAP:\t\t\tEdgeListWriter('\\t',0,'#',False),\n\t\t\tFormat.EdgeListCommaOne:\tEdgeListWriter(',',1,),\n\t\t\tFormat.EdgeListSpaceOne:\tEdgeListWriter(' ',1),\n\t\t\tFormat.EdgeListSpaceZero:\tEdgeListWriter(' ',0),\n\t\t\tFormat.EdgeListTabOne:\t\tEdgeListWriter('\\t',1),\n\t\t\tFormat.EdgeListTabZero:\t\tEdgeListWriter('\\t',0),\n\t\t\tFormat.GraphViz:\t\tDotGraphWriter(),\n\t\t\tFormat.DOT:\t\t\tDotGraphWriter(),\n\t\t\tFormat.GML:\t\t\tGMLGraphWriter(),\n\t\t\tFormat.LFR:\t\t\tEdgeListWriter('\\t',1),\n\t\t\tFormat.GraphToolBinary:\t\tGraphToolBinaryWriter()\n\t\t\t}\n\ttry:\n\t\t# special case for custom Edge Lists\n\t\tif fileformat == Format.EdgeList:\n\t\t\twriter = EdgeListWriter(kwargs['separator'],kwargs['firstNode'])\n\t\telse:\n\t\t\twriter = writers[fileformat]#(**kwargs)\n\texcept KeyError:\n\t\traise Exception(\"format {0} currently not supported\".format(fileformat))\n\treturn writer\n\ndef writeGraph(G, path, fileformat, **kwargs):\n\t\"\"\" Write graph to various output formats.\n\n\tParamaters:\n\t- G:\t\t\ta graph\n\t- path: \t\toutput path\n\t- fileformat: \tan element of the Format enumeration\n\n\t\"\"\"\n\n\tdirname = os.path.dirname(os.path.realpath(path))\n\t# the given file path does not exist yet\n\tif not os.path.isfile(path):\n\t\t# check write permissions on the directory\n\t\tif not os.access(dirname, os.W_OK):\n\t\t\t# we may not write on this directory, raise Error\n\t\t\traise IOError(\"No permission to write\")\n\t\t# else everthing is alright\n\telse:\n\t\t# the given path points to a file\n\t\tif not os.access(path, os.W_OK):\n\t\t\traise IOError(\"No permission to write\")\n\t\telse:\n\t\t\tlogging.warning(\"overriding given file\")\n\twriter = getWriter(fileformat, **kwargs)\n\twriter.write(G, path)\n\tlogging.info(\"wrote graph {0} to file {1}\".format(G, path))\n\n\nclass GraphConverter:\n\n\tdef __init__(self, reader, writer):\n\t\tself.reader = reader\n\t\tself.writer = writer\n\n\tdef convert(self, inPath, outPath):\n\t\tG = self.reader.read(inPath)\n\t\tself.writer.write(G, outPath)\n\n\tdef __str__(self):\n\t\treturn \"GraphConverter: {0} => {0}\".format(self.reader, self.writer)\n\ndef getConverter(fromFormat, toFormat):\n\treader = getReader(fromFormat)\n\twriter = getWriter(toFormat)\n\treturn GraphConverter(reader, writer)\n\n\ndef convertGraph(fromFormat, toFormat, fromPath, toPath=None):\n\tconverter = getConverter(fromFormat, toFormat)\n\tif toPath is None:\n\t\ttoPath = \"{0}.{1}.graph\".format(fromPath.split(\".\")[0], toFormat)\n\tconverter.convert(fromPath, toPath)\n\tprint(\"converted {0} to {1}\".format(fromPath, toPath))\n\n\n\n# dynamic\n\ndef readStream(path, mapped=True, baseIndex=0):\n\t\"\"\"\n\t\tRead a graph event stream from a file.\n\t\"\"\"\n\treturn DGSStreamParser(path, mapped, baseIndex).getStream()\n\ndef writeStream(stream, path):\n\t\"\"\"\n\t\tWrite a graph event stream to a file.\n\t\"\"\"\n\tDGSWriter().write(stream, path)\n" }, { "alpha_fraction": 0.6380518674850464, "alphanum_fraction": 0.6436580419540405, "avg_line_length": 25.183486938476562, "blob_id": "13eac50c9a07b837917e4cdc0f541f6aecc1110f", "content_id": "b5b1705609592c06c050aa9fd9d30edaa27b4458", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2854, "license_type": "permissive", "max_line_length": 135, "num_lines": 109, "path": "/networkit/cpp/scd/PageRankNibble.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PageRankNibble.cpp\n *\n * Created on: 26.02.2014\n * Author: Henning\n */\n\n#include \"PageRankNibble.h\"\n#include \"ApproximatePageRank.h\"\n#include \"../community/Conductance.h\"\n#include \"../auxiliary/Parallel.h\"\n#include <cmath>\n#include <vector>\n\nnamespace NetworKit {\n\nPageRankNibble::PageRankNibble(Graph& g, double alpha, double epsilon): SelectiveCommunityDetector(g), alpha(alpha), epsilon(epsilon) {\n\tassert(!g.isWeighted());\n}\n\nPageRankNibble::~PageRankNibble() {\n\n}\n\n\nstd::set<node> PageRankNibble::bestSweepSet(std::vector<std::pair<node, double>>& pr) {\n\tTRACE(\"Support size: \", pr.size());\n\n\n\t// order vertices\n\tTRACE(\"Before sorting\");\n\tauto comp([&](const std::pair<node, double>& a, const std::pair<node, double>& b) {\n\t\treturn (a.second / G.degree(a.first)) > (b.second / G.degree(b.first));\n\t});\n\tAux::Parallel::sort(pr.begin(), pr.end(), comp);\n\tTRACE(\"After sorting\");\n\n\tfor (std::vector<std::pair<node, double>>::iterator it = pr.begin(); it != pr.end(); it++) {\n\t\tTRACE(\"(\", it->first, \", \", it->second, \")\");\n\t}\n\n\n\t// find best sweep set w.r.t. conductance\n\tdouble bestCond = std::numeric_limits<double>::max();\n\tdouble cut = 0.0;\n\tdouble volume = 0.0;\n\tindex bestSweepSetIndex = 0;\n\tstd::unordered_map<node, bool> withinSweepSet;\n\tstd::vector<node> currentSweepSet;\n\n\tfor (std::vector<std::pair<node, double>>::iterator it = pr.begin(); it != pr.end(); it++) {\n\t\t// update sweep set\n\t\tnode v = it->first;\n\t\tG.forNeighborsOf(v, [&](node neigh) {\n\t\t\tif (withinSweepSet.find(neigh) == withinSweepSet.end()) {\n\t\t\t\tcut++;\n\t\t\t} else {\n\t\t\t\tcut--;\n\t\t\t}\n\t\t});\n\t\tvolume += G.volume(v);\n\t\tcurrentSweepSet.push_back(v);\n\t\twithinSweepSet[v] = true;\n\n\t\t// compute conductance\n\t\tdouble cond = cut / fmin(volume, 2 * G.numberOfEdges() - volume);\n\n\t\tstd::stringstream debug;\n\n\t\tdebug << \"Current vertex: \" << v << \"; Current sweep set conductance: \" << cond << std::endl;\n\t\tdebug << \"Current cut weight: \" << cut << \"; Current volume: \" << volume << std::endl;\n\t\tdebug << \"Total graph volume: \" << 2 * G.numberOfEdges() << std::endl;\n\n\t\tTRACE(debug.str());\n\n\t\tif (cond < bestCond) {\n\t\t\tbestCond = cond;\n\t\t\tbestSweepSetIndex = currentSweepSet.size();\n\t\t}\n\t}\n\n\tstd::set<node> bestSweepSet;\n\n\tfor (index j = 0; j < bestSweepSetIndex; j++) {\n\t\tbestSweepSet.insert(currentSweepSet[j]);\n\t}\n\n\treturn bestSweepSet;\n}\n\n\nstd::set<node> PageRankNibble::expandSeed(node seed) {\n\tDEBUG(\"APR(G, \", alpha, \", \", epsilon, \")\");\n\tApproximatePageRank apr(G, alpha, epsilon);\n\tstd::vector<std::pair<node, double>> pr = apr.run(seed);\n\tstd::set<node> s = bestSweepSet(pr);\n\treturn s;\n}\n\nstd::map<node, std::set<node> > PageRankNibble::run(std::set<unsigned int>& seeds) {\n std::map<node, std::set<node> > result;\n\tfor (auto seed : seeds) {\n\t\tauto community = expandSeed(seed);\n\t\tresult[seed] = community;\n\t}\n return result;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6937075257301331, "alphanum_fraction": 0.6956807971000671, "avg_line_length": 27.506250381469727, "blob_id": "c7440ea32f6df76617560ded79d262f1563adbec", "content_id": "acd18c439b5f125b1494540bf658b8aef1435449", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4561, "license_type": "permissive", "max_line_length": 117, "num_lines": 160, "path": "/networkit/cpp/graph/SSSP.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SSSP.h\n *\n * Created on: 15.04.2014\n * Author: cls\n */\n\n#ifndef SSSP_H_\n#define SSSP_H_\n\n#include <set>\n#include <stack>\n\n#include \"Graph.h\"\n#include \"../base/Algorithm.h\"\n\n\nnamespace NetworKit {\n\n/**\n * @ingroup graph\n * Abstract base class for single-source shortest path algorithms.\n */\nclass SSSP: public Algorithm {\n\npublic:\n\n\t/**\n\t * Creates the SSSP class for @a G and source @a s.\n\t *\n\t * @param G The graph.\n\t * @param s The source node.\n\t */\n\tSSSP(const Graph& G, node s, bool storePaths=true, bool storeStack=false, node target = none);\n\n\tvirtual ~SSSP() = default;\n\n\t/** Computes the shortest paths from the source to all other nodes. */\n\tvirtual void run() = 0;\n\n\t/**\n\t * Returns a vector of weighted distances from the source node, i.e. the\n \t * length of the shortest path from the source node to any other node.\n \t *\n \t * @param moveOut If set to true, the container will be moved out of the class instead of copying it; default=true.\n \t * @return The weighted distances from the source node to any other node in the graph.\n\t */\n\tvirtual std::vector<edgeweight> getDistances(bool moveOut=true);\n\n\t/**\n\t * Returns the distance from the source node to @a t.\n\t * @param t Target node.\n\t * @return The distance from source to target node @a t.\n\t */\n\tedgeweight distance(node t) const;\n\n\t/**\n\t * Returns the number of shortest paths between the source node and @a t.\n\t * @param t Target node.\n\t * @return The number of shortest paths between source and @a t.\n\t */\n\tbigfloat numberOfPaths(node t) const;\n\n\t/**\n\t * Returns the number of shortest paths between the source node and @a t\n\t * as a double value. Workaround for Cython\n\t * @param t Target node.\n\t * @return The number of shortest paths between source and @a t.\n\t */\n\tdouble _numberOfPaths(node t) const;\n\n\t/**\n\t * Returns the predecessor nodes of @a t on all shortest paths from source to @a t.\n\t * @param t Target node.\n\t * @return The predecessors of @a t on all shortest paths from source to @a t.\n\t */\n\tstd::vector<node> getPredecessors(node t) const;\n\n\t/**\n\t * Returns a shortest path from source to @a t and an empty path if source and @a t are not connected.\n\t *\n\t * @param t Target node.\n\t * @param forward If @c true (default) the path is directed from source to @a t, otherwise the path is reversed.\n\t * @return A shortest path from source to @a t or an empty path.\n\t */\n\tvirtual std::vector<node> getPath(node t, bool forward=true) const;\n\n\t/**\n\t * Returns all shortest paths from source to @a t and an empty set if source and @a t are not connected.\n\t *\n\t * @param t Target node.\n\t * @param forward If @c true (default) the path is directed from source to @a t, otherwise the path is reversed.\n\t * @return All shortest paths from source node to target node @a t.\n\t */\n\tvirtual std::set<std::vector<node> > getPaths(node t, bool forward=true) const;\n\n\t/* Returns the number of shortest paths to node t.*/\n\tbigfloat getNumberOfPaths(node t) const;\n\n\t/**\n\t* Returns a stack of nodes ordered in decreasing distance from the source\n\t*\n\t* @param moveOut If set to true, the container will be moved out of the class instead of copying it; default=true.\n\t* @return stack of nodes\n\t*/\n\tvirtual std::vector<node> getStack(bool moveOut=true);\n\nprotected:\n\n\tconst Graph& G;\n\tconst node source;\n\tnode target;\n\tstd::vector<edgeweight> distances;\n\tstd::vector<std::vector<node> > previous; // predecessors on shortest path\n\tstd::vector<bigfloat> npaths;\n\n\tstd::vector<node> stack;\n\n\tbool storePaths;\t\t//!< if true, paths are reconstructable and the number of paths is stored\n\tbool storeStack;\t\t//!< if true, store a stack of nodes ordered in decreasing distance from the source\n};\n\ninline edgeweight SSSP::distance(node t) const {\n\treturn distances[t];\n}\n\ninline bigfloat SSSP::numberOfPaths(node t) const {\n\tif (! storePaths) {\n\t\tthrow std::runtime_error(\"number of paths have not been stored\");\n\t}\n\treturn npaths[t];\n}\n\ninline double SSSP::_numberOfPaths(node t) const {\n\tif (! storePaths) {\n\t\tthrow std::runtime_error(\"number of paths have not been stored\");\n\t}\n\tbigfloat limit = std::numeric_limits<double>::max();\n\tif (npaths[t] > limit) {\n\t\tthrow std::overflow_error(\"number of paths do not fit into a double\");\n\t}\n\tdouble res;\n\tnpaths[t].ToDouble(res);\n\treturn res;\n}\n\ninline std::vector<node> SSSP::getPredecessors(node t) const {\n\tif (! storePaths) {\n\t\tthrow std::runtime_error(\"predecessors have not been stored\");\n\t}\n\treturn previous[t];\n}\n\ninline bigfloat SSSP::getNumberOfPaths(node t) const {\n\treturn npaths[t];\n}\n\n} /* namespace NetworKit */\n\n#endif /* SSSP_H_ */\n" }, { "alpha_fraction": 0.6268564462661743, "alphanum_fraction": 0.6417078971862793, "avg_line_length": 24.25, "blob_id": "d1232d53c8f927f1359a19652a2ef14cab17716a", "content_id": "cfb9e6bcabfed8bd2aab71c8c6b6fd07f9896f4d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1616, "license_type": "permissive", "max_line_length": 116, "num_lines": 64, "path": "/networkit/cpp/scd/ApproximatePageRank.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ApproximatePageRank.cpp\n *\n * Created on: 26.02.2014\n * Author: Henning\n */\n\n\n#include <set>\n#include <utility>\n#include \"ApproximatePageRank.h\"\n\nnamespace NetworKit {\n\nApproximatePageRank::ApproximatePageRank(const Graph& g, double alpha_, double epsilon):\n\t\tG(g), alpha(alpha_), oneMinusAlphaOver2((1.0 - alpha) * 0.5), eps(epsilon)\n{\n\n}\n\nvoid ApproximatePageRank::push(node u, std::set<node>& activeNodes)\n{\n\tdouble res = pr_res[u].second;\n\tdouble mass = oneMinusAlphaOver2 * res / G.degree(u);\n\n\tG.forNeighborsOf(u, [&](node v) {\n\t\tif (pr_res.find(v) == pr_res.end()) {\n\t\t\tpr_res[v] = std::pair<double, double>(0.0, 0.0);\n\t\t}\n\t\tpr_res[v] = std::pair<double, double>(pr_res[v].first, pr_res[v].second + mass);\n\t\tif ((pr_res[v].second / G.degree(v)) >= eps) {\n\t\t\tactiveNodes.insert(v);\n\t\t}\n\t});\n\n\tpr_res[u] = std::pair<double, double>(pr_res[u].first + alpha * res, oneMinusAlphaOver2 * res);\n\tif ((pr_res[u].second / G.degree(u)) >= eps) {\n\t\tactiveNodes.insert(u);\n\t}\n}\n\n\nstd::vector<std::pair<node, double>> ApproximatePageRank::run(node seed) {\n\tpr_res[seed] = std::pair<double, double>(0.0, 1.0);\n\tstd::set<node> activeNodes;\n\tactiveNodes.insert(seed);\n\n\twhile (activeNodes.size() > 0) {\n\t\tnode v = (* activeNodes.begin());\n\t\tactiveNodes.erase(v);\n\t\tTRACE(\"queue size: \", activeNodes.size());\n\t\tpush(v, activeNodes);\n\t}\n\n\tstd::vector<std::pair<node, double>> pr;\n\n\tfor (std::unordered_map<node, std::pair<double, double>>::iterator it = pr_res.begin(); it != pr_res.end(); it++) {\n\t\tpr.push_back(std::pair<node, double>(it->first, it->second.first));\n\t}\n\n\treturn pr;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6728280782699585, "alphanum_fraction": 0.6876155138015747, "avg_line_length": 17.03333282470703, "blob_id": "ba8e3e7f17e0945010250aeb9b7102757e0d0540", "content_id": "f72130b78638320ae818ddda00d2c32e7e7977b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 541, "license_type": "permissive", "max_line_length": 63, "num_lines": 30, "path": "/networkit/cpp/numerics/LAMG/Level/LevelFinest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LevelFinest.h\n *\n * Created on: 10.01.2015\n * Author: Michael\n */\n\n#ifndef LEVELFINEST_H_\n#define LEVELFINEST_H_\n\n#include \"Level.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup numerics\n */\nclass LevelFinest : public Level {\npublic:\n\tLevelFinest();\n\tLevelFinest(const CSRMatrix &A);\n\n\tvoid coarseType(const Vector &xf, Vector &xc) const override;\n\tvoid restrict(const Vector &bf, Vector &bc) const override;\n\tvoid interpolate(const Vector &xc, Vector &xf) const override;\n};\n\n} /* namespace NetworKit */\n\n#endif /* LEVELFINEST_H_ */\n" }, { "alpha_fraction": 0.6866359710693359, "alphanum_fraction": 0.688504159450531, "avg_line_length": 28.090579986572266, "blob_id": "cf57301f0dd0fe6ee98438746fbbbc9f2ff13673", "content_id": "745e339f60080252c9e796fec6a0e48083526e7a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8029, "license_type": "permissive", "max_line_length": 136, "num_lines": 276, "path": "/networkit/cpp/algebraic/DenseMatrix.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DenseMatrix.h\n *\n * Created on: Nov 25, 2015\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_\n#define NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_\n\n#include \"../Globals.h\"\n#include \"Vector.h\"\n\n#include <cassert>\n#include <vector>\n\nnamespace NetworKit {\n\n/**\n * Represents a dense matrix. Use this matrix to run LU decompositions and LU solves.\n * Note that most matrices are rather sparse s.t. CSRMatrix might be a better representation.\n */\nclass DenseMatrix {\nprivate:\n\tcount nRows;\n\tcount nCols;\n\tstd::vector<double> entries;\n\npublic:\n\t/** Default constructor */\n\tDenseMatrix();\n\n\t/**\n\t * Constructs an instance of DenseMatrix given the number of rows (@a nRows) and the number of columns (@a nCols) and its\n\t * values (@a entries).\n\t * @param nRows Number of rows.\n\t * @param nCols Number of columns.\n\t * @param entries Entries of the matrix.\n\t * @note The size of the @a entries vector should be equal to @a nRows * @a nCols.\n\t */\n\tDenseMatrix(const count nRows, const count nCols, const std::vector<double> &entries);\n\n\t/** Default destructor */\n\tvirtual ~DenseMatrix() = default;\n\n\t/** Default copy constructor */\n\tDenseMatrix (const DenseMatrix &other) = default;\n\n\t/** Default move constructor */\n\tDenseMatrix (DenseMatrix &&other) = default;\n\n\t/** Default copy assignment operator */\n\tDenseMatrix& operator=(DenseMatrix &&other) = default;\n\n\t/** Default move assignment operator */\n\tDenseMatrix& operator=(const DenseMatrix &other) = default;\n\n\t/**\n\t * @return Number of rows.\n\t */\n\tinline count numberOfRows() const {\n\t\treturn nRows;\n\t}\n\n\t/**\n\t * @return Number of columns.\n\t */\n\tinline count numberOfColumns() const {\n\t\treturn nCols;\n\t}\n\n\t/**\n\t * @return Value at matrix position (i,j).\n\t */\n\tdouble operator()(const index i, const index j) const;\n\n\t/**\n\t * Set the matrix at position (@a i, @a j) to @a value.\n\t */\n\tvoid setValue(const index i, const index j, const double value);\n\n\n\t/**\n\t * @return Row @a i of this matrix as vector.\n\t */\n\tVector row(const index i) const;\n\n\t/**\n\t * @return Column @a j of this matrix as vector.\n\t */\n\tVector column(const index j) const;\n\n\t/**\n\t * @return The main diagonal of this matrix.\n\t */\n\tVector diagonal() const;\n\n\t/**\n\t * Adds this matrix to @a other and returns the result.\n\t * @return The sum of this matrix and @a other.\n\t */\n\tDenseMatrix operator+(const DenseMatrix &other) const;\n\n\t/**\n\t * Adds @a other to this matrix.\n\t * @return Reference to this matrix.\n\t */\n\tDenseMatrix& operator+=(const DenseMatrix &other);\n\n\t/**\n\t * Subtracts @a other from this matrix and returns the result.\n\t * @return The difference of this matrix and @a other.\n\t *\n\t */\n\tDenseMatrix operator-(const DenseMatrix &other) const;\n\n\t/**\n\t * Subtracts @a other from this matrix.\n\t * @return Reference to this matrix.\n\t */\n\tDenseMatrix& operator-=(const DenseMatrix &other);\n\n\t/**\n\t * Multiplies this matrix with a scalar specified in @a scalar and returns the result.\n\t * @return The result of multiplying this matrix with @a scalar.\n\t */\n\tDenseMatrix operator*(const double &scalar) const;\n\n\t/**\n\t * Multiplies this matrix with a scalar specified in @a scalar.\n\t * @return Reference to this matrix.\n\t */\n\tDenseMatrix& operator*=(const double &scalar);\n\n\t/**\n\t * Multiplies this matrix with @a vector and returns the result.\n\t * @return The result of multiplying this matrix with @a vector.\n\t */\n\tVector operator*(const Vector &vector) const;\n\n\t/**\n\t * Multiplies this matrix with @a other and returns the result in a new matrix.\n\t * @return The result of multiplying this matrix with @a other.\n\t */\n\tDenseMatrix operator*(const DenseMatrix &other) const;\n\n\t/**\n\t * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix.\n\t * @return The result of dividing this matrix by @a divisor.\n\t */\n\tDenseMatrix operator/(const double &divisor) const;\n\n\t/**\n\t * Divides this matrix by a divisor specified in @a divisor.\n\t * @return Reference to this matrix.\n\t */\n\tDenseMatrix& operator/=(const double &divisor);\n\n\t/**\n\t * Decomposes the given @a matrix into lower L and upper U matrix (in-place).\n\t * @param matrix The matrix to decompose into LU.\n\t */\n\tstatic void LUDecomposition(DenseMatrix &matrix);\n\n\t/**\n\t * Computes the solution vector x to the system @a LU * x = @a b where @a LU is a matrix decomposed into L and U.\n\t * @param LU Matrix decomposed into lower L and upper U matrix.\n\t * @param b Right-hand side.\n\t * @return Solution vector x to the linear equation system LU * x = b.\n\t */\n\tstatic Vector LUSolve(const DenseMatrix &LU, const Vector &b);\n\n\t/**\n\t * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B.\n\t * @param A\n\t * @param B\n\t * @param binaryOp Function handling (double, double) -> double\n\t * @return @a A @a binaryOp @a B.\n\t * @note @a A and @a B must have the same dimensions.\n\t */\n\ttemplate<typename L> static DenseMatrix binaryOperator(const DenseMatrix &A, const DenseMatrix &B, L binaryOp);\n\n\t/**\n\t * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value)\n\t */\n\ttemplate<typename L> void forElementsInRow(index i, L handle) const;\n\n\t/**\n\t * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value)\n\t */\n\ttemplate<typename L> void parallelForElementsInRow(index i, L handle) const;\n\n\t/**\n\t * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure).\n\t */\n\ttemplate<typename L> void forElementsInRowOrder(L handle) const;\n\n\t/**\n\t * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix.\n\t */\n\ttemplate<typename L> void parallelForElementsInRowOrder(L handle) const;\n\n\t/**\n\t * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix.\n\t */\n\ttemplate<typename L> void parallelForElementsInRowOrder(L handle);\n};\n\ntemplate<typename L> inline DenseMatrix NetworKit::DenseMatrix::binaryOperator(const DenseMatrix &A, const DenseMatrix &B, L binaryOp) {\n\tassert(A.nRows == B.nRows && A.nCols == B.nCols);\n\n\tstd::vector<double> resultEntries(A.numberOfRows() * A.numberOfColumns());\n\n#pragma omp parallel for\n\tfor (index i = 0; i < A.numberOfRows(); ++i) {\n\t\tindex offset = i * A.numberOfColumns();\n\t\tfor (index j = offset; j < offset + A.numberOfColumns(); ++j) {\n\t\t\tresultEntries[j] = binaryOp(A.entries[j], B.entries[j]);\n\t\t}\n\t}\n\n\treturn DenseMatrix(A.numberOfRows(), A.numberOfColumns(), resultEntries);\n}\n\ntemplate<typename L>\ninline void NetworKit::DenseMatrix::forElementsInRow(index i, L handle) const {\n\tindex offset = i * numberOfColumns();\n\tfor (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) {\n\t\thandle(j, entries[k]);\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::DenseMatrix::parallelForElementsInRow(index i, L handle) const {\n\tindex offset = i * numberOfColumns();\n#pragma omp parallel for\n\tfor (index j = 0; j < numberOfColumns(); ++j) {\n\t\thandle(j, entries[offset + j]);\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::DenseMatrix::forElementsInRowOrder(L handle) const {\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tindex offset = i * numberOfColumns();\n\t\tfor (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) {\n\t\t\thandle(i, j, entries[k]);\n\t\t}\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) const {\n#pragma omp parallel for\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tindex offset = i * numberOfColumns();\n\t\tfor (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) {\n\t\t\thandle(i, j, entries[k]);\n\t\t}\n\t}\n}\n\ntemplate<typename L>\ninline void NetworKit::DenseMatrix::parallelForElementsInRowOrder(L handle) {\n#pragma omp parallel for\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tindex offset = i * numberOfColumns();\n\t\tfor (index k = offset, j = 0; k < offset + numberOfColumns(); ++k, ++j) {\n\t\t\thandle(i, j, entries[k]);\n\t\t}\n\t}\n}\n\n} /* namespace NetworKit */\n\n#endif /* NETWORKIT_CPP_ALGEBRAIC_DENSEMATRIX_H_ */\n" }, { "alpha_fraction": 0.6928446888923645, "alphanum_fraction": 0.7102966904640198, "avg_line_length": 16.363636016845703, "blob_id": "2accb48910810b75cea39701d1f87a3994912f39", "content_id": "77273f4bd25b89db8b4b90ce93503da4c4836952", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 573, "license_type": "permissive", "max_line_length": 53, "num_lines": 33, "path": "/networkit/cpp/sparsification/test/SparsificationBenchmark.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SparsificationBenchmark.h\n *\n * Created on: 31.07.2014\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#ifndef SparsificationBENCHMARK_H_\n#define SparsificationBENCHMARK_H_\n\n#include <gtest/gtest.h>\n\n#include \"../../graph/Graph.h\"\n#include \"../../auxiliary/Timer.h\"\n\n\nnamespace NetworKit {\n\nclass SparsificationBenchmark: public testing::Test {\nprotected:\n\tint64_t n;\npublic:\n\tSparsificationBenchmark();\n\tvirtual ~SparsificationBenchmark();\n\tGraph makeCompleteGraph(count n);\n};\n\n} /* namespace NetworKit */\n#endif /* SparsificationBENCHMARK_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.5549569129943848, "alphanum_fraction": 0.560775876045227, "avg_line_length": 22.553298950195312, "blob_id": "604486c723bb42ee1b7a602e9b8d61a772b586eb", "content_id": "1f9651d1660995f0dc17532b0ee3639cc63cee3f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4640, "license_type": "permissive", "max_line_length": 138, "num_lines": 197, "path": "/networkit/cpp/scd/GCE.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/* GCE.cpp\n *\n * Created on: 06.05.2013\n * Author: cls\n */\n\n\n#include \"GCE.h\"\n\n\nnamespace NetworKit {\n\n\nGCE::GCE(const Graph& G, std::string objective) : SelectiveCommunityDetector(G), objective(objective), intersector(G.upperNodeIdBound()) {\n\n}\n\nstd::map<node, std::set<node> > GCE::run(std::set<unsigned int>& seeds) {\n std::map<node, std::set<node> > result;\n for (auto seed : seeds) {\n auto community = expandSeed(seed);\n result[seed] = community;\n }\n return result;\n}\n\nstd::set<node> GCE::expandSeed(node s) {\n /**\n * Check if set contains node.\n */\n\tauto in = [](const std::set<node>& A, node x) {\n\t\treturn (A.find(x) != A.end());\n\t};\n\n\tstd::set<node> community;\n\n\t// values per community\n\tcount intEdges = 0;\n count extEdges = 0;\n\n std::set<node> currentShell;\n G.forNeighborsOf(s, [&](node u) {\n \tcurrentShell.insert(u);\n });\n\n\n double currentQ = 0.0; // current community quality\n\n // values per node\n count degInt, degExt; // internal, external degree\n\n\n auto boundary = [&](const std::set<node>& C) {\n\t\tstd::set<node> sh;\n\t\tfor (node v : currentShell) {\n\t\t\tG.forNeighborsOf(v, [&](node u){\n\t\t\t\tif (!in(C, u)) {\n\t\t\t\t\tsh.insert(v);\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t\treturn sh;\n\t};\n\n\n\t/** @return the shell of the given community */\n\t/*auto shell = [&](const std::set<node>& C) {\n\t\tstd::set<node> sh;\n\t\tfor (node v : C) {\n\t\t\tG.forNeighborsOf(v, [&](node u){\n\t\t\t\tif (!in(C, u)) {\n\t\t\t\t\tsh.insert(u);\n\t\t\t\t}\n\t\t\t});\n\t\t}\n\t\treturn sh;\n\t};*/\n\n\t/**\n\t * internal and external degree of a node with respect to the community\n\t */\n\tauto intExtDeg = [&](node v, const std::set<node>& C) {\n\t\tcount degInt = 0;\n\t\tcount degExt = 0;\n\t\tG.forNeighborsOf(v, [&](node u) {\n\t\t\tif (in(C, u)) {\n\t\t\t\tdegInt += 1;\n\t\t\t} else {\n\t\t\t\tdegExt += 1;\n\t\t\t}\n\t\t});\n\t\treturn std::make_pair(degInt, degExt);\n\t};\n\n\n auto intExtEdges = [&](const std::set<node>& community) {\n count internal = 0;\n count external = 0;\n for (node u : community) {\n G.forEdgesOf(u, [&](node u, node v) {\n if (in(community, v)) {\n internal += 1;\n } else {\n external += 1;\n }\n });\n }\n internal = internal / 2; // internal edges were counted twice\n return std::make_pair(internal, external);\n };\n\n\n /*\n * objective function M\n * @return quality difference for the move of v to C\n */\n\tauto deltaM = [&](node v, const std::set<node>& C){\n\t\tdouble delta = (intEdges + degInt) / (double) (extEdges - degInt + degExt);\n\t\treturn delta - currentQ;\n\t};\n\n\n /*\n * objective function L\n * @return quality difference for the move of v to C\n */\n auto deltaL = [&](node v, std::set<node>& C){\n \tC.insert(v);\n \tdouble numerator = 2.0 * (intEdges + degInt) * boundary(C).size();\n \tdouble denominator = C.size() * (extEdges - degInt + degExt);\n \tC.erase(v);\n return (numerator / denominator) - currentQ;\n };\n\n\n\n /*auto acceptability = [&](node v, std::set<node>& C){\n double intersectSize ;\n double unionSize;\n return intersectSize / unionSize;\n };*/\n\n\n std::function<double(node v, std::set<node>& C)> deltaQ;\n // select quality objective\n if (objective == \"M\") {\n deltaQ = deltaM;\n } else if (objective == \"L\") {\n deltaQ = deltaL;\n } else {\n throw std::runtime_error(\"unknown objective function\");\n }\n\n\n // insert seed\n community.insert(s);\n\n // for M, quality of {s} is 0.0\n\n\tdouble dQMax;\n\tnode vMax;\n\tdo {\n // get values for current community\n std::tie(intEdges, extEdges) = intExtEdges(community);\n // scan shell for node with maximum quality improvement\n\t\tdQMax = 0.0; \t// maximum quality improvement\n\t\tvMax = none;\n//\t\tfor (node v : shell(community)) {\n\t\tfor (node v : currentShell) {\n // get values for current node\n std::tie(degInt, degExt) = intExtDeg(v, community);\n\t\t\tdouble dQ = deltaQ(v, community);\n\t\t\tTRACE(\"dQ: \", dQ);\n\t\t\tif (dQ >= dQMax) {\n\t\t\t\tvMax = v;\n\t\t\t\tdQMax = dQ;\n\t\t\t}\n\t\t}\n\t\tTRACE(\"vMax: \", vMax);\n\t\tTRACE(\"dQMax: \", dQMax);\n\t\tif (vMax != none) {\n\t\t\tcommunity.insert(vMax); \t// add best node to community\n\t\t\tcurrentShell.erase(vMax);\t// remove best node from shell\n\t\t\tG.forNeighborsOf(vMax, [&](node v) { // insert external neighbors of vMax into shell\n\t\t\t\tif (!in(community, v)) {\n\t\t\t\t\tcurrentShell.insert(v);\n\t\t\t\t}\n\t\t\t});\n currentQ += dQMax; // update current community quality\n\t\t\tTRACE(\"community: \", community);\n\t\t}\n\t} while (vMax != none);\n\n\treturn community;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6105149984359741, "alphanum_fraction": 0.6689913868904114, "avg_line_length": 20.170454025268555, "blob_id": "8069de9b7e6681d769f240f21a2b769d4e911790", "content_id": "a5fa97418f6cdd7e9984e2971b7c4dad1dd33e6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1864, "license_type": "permissive", "max_line_length": 96, "num_lines": 88, "path": "/networkit/cpp/algebraic/test/AdjacencyMatrixGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * AdjacencyMatrixGTest.cpp\n *\n * Created on: 02.04.2014\n * Author: Michael\n */\n\n#include \"AdjacencyMatrixGTest.h\"\n\nnamespace NetworKit {\n\nAdjacencyMatrixGTest::AdjacencyMatrixGTest() {\n}\n\nAdjacencyMatrixGTest::~AdjacencyMatrixGTest() {\n}\n\nTEST(AdjacencyMatrixGTest, testSmallAdjacencyMatrix) {\n\tGraph graph(6);\n\tgraph.addEdge(0,0);\n\tgraph.addEdge(0,1);\n\tgraph.addEdge(0,4);\n\tgraph.addEdge(1,2);\n\tgraph.addEdge(1,4);\n\tgraph.addEdge(2,3);\n\tgraph.addEdge(3,4);\n\tgraph.addEdge(3,5);\n\n\tAdjacencyMatrix mat(graph);\n\n\t// first row\n\tEXPECT_EQ(1, mat(0,0));\n\tEXPECT_EQ(1, mat(0,1));\n\tEXPECT_EQ(0, mat(0,2));\n\tEXPECT_EQ(0, mat(0,3));\n\tEXPECT_EQ(1, mat(0,4));\n\tEXPECT_EQ(0, mat(0,5));\n\n\t// third row\n\tEXPECT_EQ(0, mat(2,0));\n\tEXPECT_EQ(1, mat(2,1));\n\tEXPECT_EQ(0, mat(2,2));\n\tEXPECT_EQ(1, mat(2,3));\n\tEXPECT_EQ(0, mat(2,4));\n\tEXPECT_EQ(0, mat(2,5));\n\n\t// fifth row\n\tEXPECT_EQ(1, mat(4,0));\n\tEXPECT_EQ(1, mat(4,1));\n\tEXPECT_EQ(0, mat(4,2));\n\tEXPECT_EQ(1, mat(4,3));\n\tEXPECT_EQ(0, mat(4,4));\n\tEXPECT_EQ(0, mat(4,5));\n\n\n\t// directed, weighted graph\n\tGraph dGraph(4, true, true);\n\tdGraph.addEdge(0,1,2);\n\tdGraph.addEdge(0,0, 42);\n\tdGraph.addEdge(2,3,-3);\n\tdGraph.addEdge(3,2,5);\n\n\tmat = AdjacencyMatrix(dGraph);\n\tASSERT_EQ(dGraph.numberOfNodes(), mat.numberOfRows());\n\tASSERT_EQ(dGraph.numberOfNodes(), mat.numberOfColumns());\n\n\tEXPECT_EQ(2, mat(0,1));\n\tEXPECT_EQ(0, mat(1,0));\n\tEXPECT_EQ(42, mat(0,0));\n\tEXPECT_EQ(-3, mat(2,3));\n\tEXPECT_EQ(5, mat(3,2));\n}\n\nTEST(AdjacencyMatrixGTest, testAdjacencyMatrixOfLesmisGraph) {\n\t// read lesmis graph\n\tMETISGraphReader graphReader;\n\tGraph graph = graphReader.read(\"input/lesmis.graph\");\n\n\t// create AdjacencyMatrix\n\tAdjacencyMatrix mat(graph);\n\n\tmat.forNonZeroElementsInRowOrder([&](const index row, const index column, const double value) {\n\t\tEXPECT_EQ(graph.weight(row, column), value);\n\t});\n}\n\n\n} /* namespace NetworKit */\n\n" }, { "alpha_fraction": 0.6193712949752808, "alphanum_fraction": 0.6779949069023132, "avg_line_length": 24.69869041442871, "blob_id": "db5460d3c8909c927e4b2f6370682e115bd13a2c", "content_id": "86c38524dd7378233c0452f4f3f26446c06f084d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5885, "license_type": "permissive", "max_line_length": 88, "num_lines": 229, "path": "/networkit/cpp/sparsification/test/SimmelianBackboneGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SimmelianBackboneGTest.cpp\n *\n * Created on: 31.05.2014\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#include \"SimmelianBackboneGTest.h\"\n\n#include \"../Sparsifiers.h\"\n#include \"../SimmelianOverlapScore.h\"\n#include \"../../edgescores/ChibaNishizekiTriangleEdgeScore.h\"\n\nnamespace NetworKit {\n\nTEST_F(SimmelianBackboneGTest, testOverlapCounting) {\n\t//Build up a ranked neighborhood graph. Notation: Ego/Alter/Simmeliannes/Rank\n\tstd::vector<RankedNeighbors> neighbors(2);\n\tneighbors[0].push_back(RankedEdge(0,1,3,0));\n\tneighbors[0].push_back(RankedEdge(0,2,2,1));\n\tneighbors[0].push_back(RankedEdge(0,5,2,1));\n\tneighbors[0].push_back(RankedEdge(0,3,1,3));\n\tneighbors[0].push_back(RankedEdge(0,6,1,3));\n\n\tneighbors[1].push_back(RankedEdge(1,0,3,0));\n\tneighbors[1].push_back(RankedEdge(1,2,2,1));\n\tneighbors[1].push_back(RankedEdge(1,4,2,1));\n\tneighbors[1].push_back(RankedEdge(1,3,1,3));\n\n\tGraph G(0);\n\tG.indexEdges();\n\n\tSimmelianOverlapScore simmel(G, std::vector<count>(), 0);\n\tsimmel.run();\n\tRedundancy r (0, 0.0);\n\n\tr = simmel.getOverlap(0, 1, neighbors, 0);\n\tEXPECT_EQ(0, r.overlap) << \"wrong overlap\";\n\tEXPECT_DOUBLE_EQ(0.0, r.jaccard) << \"wrong jaccard index\";\n\n\tr = simmel.getOverlap(1, 0, neighbors, 0);\n\tEXPECT_EQ(0, r.overlap) << \"wrong overlap\";\n\tEXPECT_DOUBLE_EQ(0.0, r.jaccard) << \"wrong jaccard index\";\n\n\tr = simmel.getOverlap(0, 1, neighbors, 1);\n\tEXPECT_EQ(1, r.overlap) << \"wrong overlap\";\n\tEXPECT_DOUBLE_EQ((1.0/3.0), r.jaccard) << \"wrong jaccard index\";\n\n\tr = simmel.getOverlap(0, 1, neighbors, 2);\n\tEXPECT_EQ(1, r.overlap) << \"wrong overlap\";\n\tEXPECT_DOUBLE_EQ((1.0/3.0), r.jaccard) << \"wrong jaccard index\";\n\n\tr = simmel.getOverlap(0, 1, neighbors, 3);\n\tEXPECT_EQ(2, r.overlap) << \"wrong overlap\";\n\tEXPECT_DOUBLE_EQ((2.0/5.0), r.jaccard) << \"wrong jaccard index\";\n}\n\nTEST_F(SimmelianBackboneGTest, testRankedNeighborhood) {\n\tGraph g(10);\n\n\tg.addEdge(4,5);\n\tg.addEdge(4,6);\n\tg.addEdge(5,6);\n\n\tg.addEdge(4,8);\n\tg.addEdge(4,7);\n\tg.addEdge(7,8);\n\n\tg.addEdge(4,9);\n\tg.addEdge(8,9);\n\tg.indexEdges();\n\n\t//Apply triangle counting algorithm\n\tChibaNishizekiTriangleEdgeScore counter(g);\n\tcounter.run();\n\tstd::vector<count> triangles = counter.scores();\n\n\t//Actual test: ranked neighborhood\n\tSimmelianOverlapScore simmel(g, triangles, 0);\n\tsimmel.run();\n\tstd::vector<RankedNeighbors> neighborhood = simmel.getRankedNeighborhood(g, triangles);\n\n\t//Neighborhood of 4\n\tEXPECT_EQ(5, neighborhood[4].size());\n\tEXPECT_EQ(RankedEdge(4, 8, 2, 0), neighborhood[4][0]);\n\tEXPECT_EQ(RankedEdge(4, 9, 1, 1), neighborhood[4][1]);\n\tEXPECT_EQ(RankedEdge(4, 7, 1, 1), neighborhood[4][2]);\n\tEXPECT_EQ(RankedEdge(4, 6, 1, 1), neighborhood[4][3]);\n\tEXPECT_EQ(RankedEdge(4, 5, 1, 1), neighborhood[4][4]);\n\n\t//Neighborhood of 8\n\tEXPECT_EQ(3, neighborhood[8].size());\n\tEXPECT_EQ(RankedEdge(8, 4, 2, 0), neighborhood[8][0]);\n\tEXPECT_EQ(RankedEdge(8, 9, 1, 1), neighborhood[8][1]);\n\tEXPECT_EQ(RankedEdge(8, 7, 1, 1), neighborhood[8][2]);\n}\n\nTEST_F(SimmelianBackboneGTest, testRankedNeighborhoodSkippedRanks) {\n\tGraph g(7);\n\n\tg.addEdge(0,1);\n\tg.addEdge(0,2);\n\tg.addEdge(0,3);\n\tg.addEdge(0,4);\n\tg.addEdge(0,5);\n\tg.addEdge(0,6);\n\n\tg.addEdge(1,2);\n\tg.addEdge(2,3);\n\tg.addEdge(3,4);\n\tg.addEdge(4,5);\n\tg.addEdge(4,6);\n\tg.indexEdges();\n\n\t//Apply triangle counting algorithm\n\tChibaNishizekiTriangleEdgeScore counter(g);\n\tcounter.run();\n\tstd::vector<count> triangles = counter.scores();\n\n\t//Actual test: ranked neighborhood\n\tSimmelianOverlapScore simmel(g, triangles, 0);\n\tsimmel.run();\n\tstd::vector<RankedNeighbors> neighborhood = simmel.getRankedNeighborhood(g, triangles);\n\n\t//Neighborhood of 0\n\tEXPECT_EQ(6, neighborhood[0].size());\n\tEXPECT_EQ(RankedEdge(0, 4, 3, 0), neighborhood[0][0]);\n\tEXPECT_EQ(RankedEdge(0, 3, 2, 1), neighborhood[0][1]);\n\tEXPECT_EQ(RankedEdge(0, 2, 2, 1), neighborhood[0][2]);\n\tEXPECT_EQ(RankedEdge(0, 6, 1, 3), neighborhood[0][3]);\n\tEXPECT_EQ(RankedEdge(0, 5, 1, 3), neighborhood[0][4]);\n\tEXPECT_EQ(RankedEdge(0, 1, 1, 3), neighborhood[0][5]);\n\n\t//Neighborhood of 4\n\tEXPECT_EQ(4, neighborhood[4].size());\n\tEXPECT_EQ(RankedEdge(4, 0, 3, 0), neighborhood[4][0]);\n\tEXPECT_EQ(RankedEdge(4, 6, 1, 1), neighborhood[4][1]);\n\tEXPECT_EQ(RankedEdge(4, 5, 1, 1), neighborhood[4][2]);\n\tEXPECT_EQ(RankedEdge(4, 3, 1, 1), neighborhood[4][3]);\n\n}\n\nTEST_F(SimmelianBackboneGTest, testOverlapFiltering) {\n\tGraph g(10);\n\n\tg.addEdge(0,1);\n\tg.addEdge(1,2);\n\tg.addEdge(2,3);\n\tg.addEdge(3,4);\n\tg.addEdge(4,0);\n\n\tg.addEdge(0,2);\n\tg.addEdge(1,3);\n\tg.addEdge(2,4);\n\tg.addEdge(3,0);\n\tg.addEdge(4,1);\n\n\tg.addEdge(5,6);\n\tg.addEdge(6,7);\n\tg.addEdge(7,8);\n\tg.addEdge(8,9);\n\tg.addEdge(9,5);\n\n\tg.addEdge(5,7);\n\tg.addEdge(6,8);\n\tg.addEdge(7,9);\n\tg.addEdge(8,5);\n\tg.addEdge(9,6);\n\n\tg.addEdge(0,6);\n\tg.addEdge(0,5);\n\tg.addEdge(5,1);\n\tg.indexEdges();\n\n\tSimmelianSparsifierParametric simmel(g, 2, 1);\n\tsimmel.run();\n\tGraph b = simmel.getGraph();\n\n\tEXPECT_EQ(20, b.numberOfEdges());\n\n\tEXPECT_FALSE(b.hasEdge(0,6));\n\tEXPECT_FALSE(b.hasEdge(0,5));\n\tEXPECT_FALSE(b.hasEdge(5,1));\n\n\tEXPECT_TRUE(b.hasEdge(0,1));\n\tEXPECT_TRUE(b.hasEdge(1,2));\n\tEXPECT_TRUE(b.hasEdge(2,3));\n\tEXPECT_TRUE(b.hasEdge(3,4));\n\tEXPECT_TRUE(b.hasEdge(4,0));\n}\n\nTEST_F(SimmelianBackboneGTest, testBackboneTrivial) {\n\tGraph g(5);\n\n\tg.addEdge(0,1);\n\tg.addEdge(0,2);\n\tg.addEdge(1,2);\n\tg.indexEdges();\n\n\t//Parametric\n\tSimmelianSparsifierParametric simmel(g, 1, 0);\n\tsimmel.run();\n\tGraph b = simmel.getGraph();\n\tEXPECT_EQ(3, b.numberOfEdges()) << \"wrong edge count in backbone\";\n\tEXPECT_EQ(5, b.numberOfNodes()) << \"wrong node count in backbone\";\n}\n\nTEST_F(SimmelianBackboneGTest, testBackboneConnectedGraph) {\n\tGraph g(25);\n\tg.forNodePairs([&](node u, node v){\n\t\tg.addEdge(u,v);\n\t});\n\tg.shrinkToFit();\n\n\tg.indexEdges();\n\n\t//Parametric\n\tSimmelianSparsifierParametric simmel(g, 25, 5);\n\tsimmel.run();\n\tGraph b = simmel.getGraph();\n\tEXPECT_EQ(300, b.numberOfEdges()) << \"wrong edge count in backbone\";\n}\n\n}\n/* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.7270599007606506, "alphanum_fraction": 0.7398232817649841, "avg_line_length": 40.24922180175781, "blob_id": "77b9e98e2371171f8bd98cc067ca8f2a83aa8dfe", "content_id": "d454c08a4aceae54fd966e755f6d47ce90627ad8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 13241, "license_type": "permissive", "max_line_length": 406, "num_lines": 321, "path": "/Doc/doc/get_started.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": ".. |separator| raw:: html\n\n\t<div style=\"padding-top: 25px; border-bottom: 1px solid #d4d7d9;\"></div>\n\n\n.. _get_started:\n\n===========\nGet Started\n===========\n\nWe support three ways to install NetworKit:\n\n- `NetworKit Virtual Machine`_: Download and try NetworKit preinstalled on a virtual machine. This is strongly recommended for users using Microsoft Windows.\n\n- `Pip install`_: Download the NetworKit Python package with pip. This is the easier way to get NetworKit but you can only use NetworKit via Python this way.\n\n- `Build NetworKit from Source`_: Clone or download the source code of NetworKit and build the C++ and Python modules from source.\n\n\n\nWith NetworKit as a Python extension module, you get access to native high-performance code and can at the same time work interactively in the Python ecosystem.\nAlthough the standard Python interpreter works fine, we recommend `IPython <http://ipython.readthedocs.org/en/stable/>`_ as a great environment for scientific\nworkflows. View the `IPython Quickstart Guide`_ for installation instructions and how to use NetworKit with IPython.\n\n\nOnce you have installed NetworKit, please make sure to check out our\n`NetworKit UserGuide <http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/uploads/docs/NetworKit_UserGuide.ipynb>`_ for an overview of the features provided\nin NetworKit.\n\n|separator|\n\n.. _NetworKit Virtual Machine:\n\nInstall the NetworKit Virtual Machine\n=====================================\n\nIf you want a quick and easy way to try NetworKit for your purposes or you use a Microsoft Windows operating system, we strongly recommend the installation of our\nNetworKit virtual machine that can be downloaded `here <https://networkit.iti.kit.edu/uploads/networkit-vm.zip>`_.\n\nTake a look at our `installation guide <https://networkit.iti.kit.edu/networkit-vm_guide.html>`_ for further instructions on installing the virtual machine on your system.\n\n\n\n|separator|\n\n\n.. _Pip install:\n\nInstall NetworKit via Pip\n=========================\n\n.. _Python Requirements:\n\nRequirements\n~~~~~~~~~~~~\n\nYou will need the following software to install NetworKit as a python package:\n\n- A modern C++ compiler, e.g.: `g++ <https://gcc.gnu.org>`_ (>= 4.8) or `clang++ <http://clang.llvm.org>`_ (>= 3.7)\n- Python 3 (>= 3.4 is recommended, 3.3 supported)\n- `Pip <https://pypi.python.org/pypi/pip>`_\n- `SCons <http://scons.org>`_: Please note that SCons is only available for Python 2. For installation via pip, we have a script that builds the C++ part of NetworKit,\tso you can try it without SCons.\n- `Cython <http://cython.org/>`_ (>= 0.21): Only needed by developers.\n\nNetworKit uses some additional external Python packages. While you do not need them to run NetworKit, it is strongly recommended to install them in order to use all\nthe features of NetworKit:\n\n- scipy\n- numpy\n- readline\n- matplotlib\n- networkx\n- tabulate\n\nYou can use the command :code:`pip3 install scipy numpy readline matplotlib networkx tabulate` on your terminal to install all packages at once. During the installation of\nNetworKit, the setup will check if the external packages NetworKit uses are available and print warnings at the end of the installation process. If you do not see any\nwarnings, your system should be ready to use NetworKit.\n\n\nInstall NetworKit\n~~~~~~~~~~~~~~~~~\n\nRun :code:`[sudo] pip[3] install [--user] networkit` from your command line to install the Python package *networkit*.\n\nYou can remove NetworKit completely by using the command :code:`[sudo] pip[3] uninstall networkit`.\n\nTo check that everything works as expected, open a python terminal and run the following lines:\n\n.. code-block:: python\n\n import networkit\n G = networkit.Graph(5)\n G.addEdge(0,1)\n G.toString()\n\n|separator|\n\n.. _Build NetworKit from Source:\n\nBuild NetworKit from Source\n===========================\n\nYou can clone NetworKit from `AlgoHub <http://algohub.iti.kit.edu/parco/NetworKit/NetworKit/>`_ with Mercurial or download the source code as a\n`Zip file <https://networkit.iti.kit.edu/uploads/NetworKit.zip>`_.\n\nRequirements\n~~~~~~~~~~~~\n\nYou will need the following software to install NetworKit as a Python package:\n\n- A modern C++ compiler, e.g.: `g++ <https://gcc.gnu.org>`_ (>= 4.8) or `clang++ <http://clang.llvm.org>`_ (>= 3.7)\n- `SCons <http://scons.org>`_: Please note that SCons is only available for Python 2. For the different build targets, SCons is mandatory.\n- `Google Test <https://github.com/google/googletest>`_ (only needed if you want to build the unit tests, which is recommended)\n\nBuilding NetworKit\n~~~~~~~~~~~~~~~~~~\n\nThis section describes how to build NetworKit including the Python functionality. If you do not wish to install NetworKit as a Python package, please refer\nto `Building Only the C++ Core`_.\n\nFor building NetworKit including the Python functionality, make sure to also install the software from the `Python Requirements`_ listed in the `Pip install`_.\n\nAfter all requirements are installed, switch to the top folder of NetworKit and run the script *setup.py* with the following options:\n\n.. code-block:: bash\n\n\tpython3 setup.py build_ext --inplace [--optimize=V] [-jX]\n\nThe script will call SCons to compile NetworKit as a library and then build the extensions in the folder *src/python*. By default, NetworKit will be built with\nthe amount of available cores in optimized mode. It is possible to add the options :code:`--optimize=V` and :code:`-jN` the same way it can be done to a manual\nSCons call, to specify the optimization level and the number of threads used for compilation. The setup script provides more functionality and can be used with\npip aswell:\n\n.. code-block:: bash\n\n\tpip3 install -e ./\n\nwill compile NetworKit, build the extensions and on top of that temporarily install NetworKit so that it is available on the whole system. This can be undone by\ncalling :code:`pip3 uninstall networkit`.\n\n.. code-block:: bash\n\n\tpython3 setup.py clean [--optimize=V]\n\nwill remove the extensions and its build folder as well as call SCons to remove the NetworKit library and its build folder specified by :code:`--optimize=V`.\n\nNote: All of the above installation command may require root privileges depending on your system, so try this accordingly. If you do not have root privileges,\nadd :code:`--user` to your command.\n\n\nBuilding Only the C++ Core\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nIn case you do not need NetworKit's Python functionality, this section describes how to build the C++ parts only.\n\nWe recommend SCons for building the C++ part of NetworKit. Individual settings for your environment will be read from a configuration file. As an example, the\nfile *build.conf.example* is provided. Copy this to *build.conf* and edit your environment settings. Then call Scons.\n\nThe call to SCons has the following options:\n\n.. code-block:: bash\n\n\tscons --optimize=<level> --target=<target>\n\nwhere :code:`<level>` can be\n\n- :code:`Dbg` debug\n- :code:`Opt` optimized\n- :code:`Pro` profiling\n\nand :code:`target` can be\n\n- :code:`Core` build NetworKit as a library, required for the Python extenstion through Cython.\n- :code:`Tests` build executable for the unit tests (requires GoogleTest).\n- :code:`Lib` build NetworKit as a library and create symbolic links.\n\nFor example, to build NetworKit as an optimized library, run\n\n.. code-block:: bash\n\n\tscons --optimize=Opt --target=Lib\n\nTo speed up the compilation on a multicore machine, you can append :code:`-jX` where *X* denotes the number of threads to compile with.\n\nLogging is enabled by default. If you want to disable logging functionality, add the following to your scons call:\n\n.. code-block:: bash\n\n\t--logging=no\n\n\nUse NetworKit as a library\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nIt is also possible to use NetworKit as a library. Therefore, choose the target `Lib` when compiling NetworKit. The include directives in your C++\\-application\nlook like the following\n\n.. code-block:: C\n\n\t#include <NetworKit/graph/Graph.h>\n\nNetworKit in the directory `include` is a symlink to the directory `networkit/cpp`, so the directory structure from the repository is valid. To compile your\napplication, you need to add the paths for the header files and the location of the library. Note, that it is possible to link the different builds\n(debug, profiling, optimized) of the library. There is a simple source file to demonstrate this. Feel free to compile `LibDemo.cpp` as follows:\n\n.. code-block:: bash\n\n\tg++ -o LibDemo -std=c++11 -I/path/to/repo/include -L/path/to/repo LibDemo.cpp -lNetworKit -fopenmp\n\n\nTest\n~~~~\n\nYou actually do not need to build and run our unit tests. However, if you experience any issues with NetworKit, you might want to check, if NetworKit runs properly.\nPlease refer to the `Unit Tests and Testing <https://networkit.iti.kit.edu/api/DevGuide.html#devguide-unittests>`_ section in our `NetworKit Development Guide <https://networkit.iti.kit.edu/api/DevGuide.html#devGuide>`_.\n\n\nKnown Issues\n~~~~~~~~~~~~\n\n- Mac OS X 10.10 \"Yosemite\": Some users have reported compilation problems on Yosemite with g++ 4.9. The compiler errors mention register problems.\n While the exact reason remains unclear, the actual issue seems to be that the compiler tries to perform a dual architecture build.\n Fix: Enforce a 64-bit build by prepending :code:`ARCHFLAGS=\"-arch x86_64\"` to your setup/pip command, e.g. as in\n :code:`sudo ARCHFLAGS=\"-arch x86_64\" python3 setup.py build_ext --inplace -j4` or :code:`sudo ARCHFLAGS=\"-arch x86_64\" pip3 install networkit`.\n\n-\tNetworKit has not yet been successfully built on **Windows**. This is partially due to the fact that Windows ships without a C++ compiler which is\n\tnecessary to build\tthe Python extensions. Even with the Visual C++ Redistributable our attempts were not successful. Any help is appreciated. It may\n\tbe possible to build NetworKit as a library on Windows in environments like MinGW or Cygwin.\n\n\nContributions\n~~~~~~~~~~~~~\n\nWe would like to encourage contributions to the NetworKit source code. See the `NetworKit Development Guide <https://networkit.iti.kit.edu/api/DevGuide.html#devGuide>`_ for instructions. For support\nplease contact the `mailing list <https://lists.ira.uni-karlsruhe.de/mailman/listinfo/networkit>`_.\n\n\n|separator|\n\n\n.. _IPython Quickstart Guide:\n\nUse NetworKit with IPython\n==========================\n\nFirst make sure you have installed IPython, e.g. via pip: :code:`pip3 install ipython`.\n\nIPython Terminal\n~~~~~~~~~~~~~~~~\n\nIf you want to use NetworKit in the IPython terminal, type the following commands in your OS terminal:\n\n.. code-block:: bash\n\n\tipython3\n\n.. code-block:: python\n\n\tfrom networkit import *\n\nThe first line opens the IPython terminal. The second line imports the *networkit* Python module. After that, you should be able to use NetworKit interactively.\nFor usage examples, refer to the `NetworKit UserGuide <http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/uploads/docs/NetworKit_UserGuide.ipynb>`_.\n\nIPython Notebook/jupyter\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nAdditionally, we recommend that you familiarize yourself with NetworKit through experimenting with the interactive IPython Notebook `NetworKit_UserGuide.ipynb` located\nin the folder `Doc/Notebooks`. The user guide also introduces a large portion of NetworKits functionality with usage examples. To display and work with these notebooks,\nyou have to install jupyter and start a local notebook server from the terminal with:\n\n.. code-block:: bash\n\n\tjupyter/ipython3 notebook\n\nIf you run into any problems with jupyter, head over to the `jupyter documentation <http://jupyter.readthedocs.io/en/latest/install.html>`_. If the notebook server starts as it is supposed to, your default browser should open a web interface or you have to open it manually. Then you can add `NetworKit_UserGuide.ipynb` from the above mentioned location or browse to the location through the web interface.\n\nTo show plots within the notebooks, place the following two lines at the beginning of your notebook:\n\n.. code-block:: python\n\n\t%matplotlib inline\n\tmatplotlib.pyplot as plt\n\n**Note:** Instead of running jupyter, it may still be possible to run :code:`ipython3 notebook`. However, the notebook functionality of the ipython package is deprecated and has been moved to jupyter, which we strongly recommend.\n\nUsage Example\n~~~~~~~~~~~~~\n\nNow that you are done installing NetworKit, you might want to try the following example:\n\n.. code-block:: python\n\n\t>>> from networkit import *\n\t>>> g = generators.HyperbolicGenerator(1e5).generate()\n\t>>> overview(g)\n\tNetwork Properties for:\t\tG#5\n\tnodes, edges\t\t\t100000, 300036\n\tdirected?\t\t\tFalse\n\tweighted?\t\t\tFalse\n\tisolated nodes\t\t\t1815\n\tself-loops\t\t\t0\n\tdensity\t\t\t\t0.000060\n\tclustering coefficient\t\t0.720003\n\tmin/max/avg degree\t\t0, 1174, 6.000720\n\tdegree assortativity\t\t0.001383\n\tnumber of connected components\t4026\n\tsize of largest component\t78387 (78.39 %)\n\n\t>>> communities = community.detectCommunities(g, inspect=True)\n\tPLM(balanced,pc,turbo) detected communities in 0.14902853965759277 [s]\n\tsolution properties:\n\t------------------- -----------\n\t# communities 4253\n\tmin community size 1\n\tmax community size 1821\n\tavg. community size 23.5128\n\tmodularity 0.987991\n\t------------------- -----------\n\n\t>>>\n" }, { "alpha_fraction": 0.7789620161056519, "alphanum_fraction": 0.7826691269874573, "avg_line_length": 70.93333435058594, "blob_id": "dcbf7f6fd30430c16954f968cb4ca8946d32236a", "content_id": "06c823b7a23bf2ba8cec39ad0b370203e8ddc4bc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2158, "license_type": "permissive", "max_line_length": 727, "num_lines": 30, "path": "/Doc/doc/projects.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": ".. |separator| raw:: html\n\n\t<div style=\"padding-top: 25px; border-bottom: 1px solid #d4d7d9;\"></div>\n\n========\nProjects\n========\n\nOn this page we present projects that use our NetworKit tool suite.\n\n\nImage Segmentation\n~~~~~~~~~~~~~~~~~~\n\nThere are serveral approaches to handle segmenting images into its main parts. One approach is to represent the image as a graph and apply graph clustering algorithms to compute a segmentation. This project is based on NetworKit and demonstrates how the framework can be used to segment images. A detailed project description with a the basic idea behind the implemented algorithms can be found in this `pdf-file <https://networkit.iti.kit.edu/data/uploads/projects/networkit-imagesegmentation.pdf>`_. Furthermore, an interactive `iPython Notebook <http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/data/uploads/projects/graph-based-segmentation.ipynb>`_ is also available.\n\nThe project can be found on `AlgoHub <https://algohub.iti.kit.edu/parco/NetworKit/NetworKit-ImageSegmentation>`_.\n\n\n|separator|\n\n\nProtein Interaction Networks\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nIn his Master thesis, Patrick Flick analyzed protein-protein interaction (PPI) networks of human cells. This work specifically looks at PPI networks of various cell and tissue types, here called tissue-specific PPIs (TSPPIs).\n\nThis work follows the goal to gain insights into the structure of interactions as well as into the properties of specific groups of proteins inside the TSPPI networks. To that end, an analysis pipeline was implemented and efficient analysis algorithms were developed, which operate on a sub-graph representation for TSPPI networks. The graph properties of TSPPI networks and properties of certain classes of proteins in the network were investigated. This work then re-evaluated prior research on a large set of TSPPIs, and demonstrated that some previous conclusions have to be reconsidered. Finally, NetworKit community-detection algorithms were employed in order to identify tissue-specific functional modules within TSPPIs.\n\nThe code, the thesis and more information is available on `github <https://github.com/r4d2/tsppi>`_.\n" }, { "alpha_fraction": 0.7426342964172363, "alphanum_fraction": 0.7495667338371277, "avg_line_length": 21.19230842590332, "blob_id": "9780cc8b5a511dce829b64fb85e03ad40ac40a81", "content_id": "08004b9500ab97273fca7401e5b7b015337e387e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1154, "license_type": "permissive", "max_line_length": 101, "num_lines": 52, "path": "/networkit/cpp/numerics/LAMG/LevelHierarchy.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LevelHierarchy.h\n *\n * Created on: 10.01.2015\n * Author: Michael\n */\n\n#ifndef LEVELHIERARCHY_H_\n#define LEVELHIERARCHY_H_\n\n#include \"Level/Level.h\"\n#include \"Level/LevelFinest.h\"\n#include \"Level/LevelElimination.h\"\n#include \"Level/LevelAggregation.h\"\n#include \"../../algebraic/DenseMatrix.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup numerics\n */\nclass LevelHierarchy {\nprivate:\n\tstd::vector<LevelType> levelType;\n\tstd::vector<index> levelIndex;\n\tstd::vector<LevelElimination> eliminationLevels;\n\tstd::vector<LevelAggregation> aggregationLevels;\n\tLevelFinest finestLevel;\n\tDenseMatrix coarseLUMatrix;\n\n\tvoid createCoarseMatrix();\n\npublic:\n\tLevelHierarchy();\n\n\tvoid addFinestLevel(const CSRMatrix &A);\n\tvoid addEliminationLevel(const CSRMatrix &A, const std::vector<EliminationStage> &coarseningStages);\n\tvoid addAggregationLevel(const CSRMatrix &A, const CSRMatrix &P, const CSRMatrix &R);\n\tvoid setLastAsCoarsest();\n\tDenseMatrix& getCoarseMatrix();\n\n\tcount size() const;\n\tLevelType getType(index levelIdx) const;\n\tLevel& at(index levelIdx);\n\tdouble cycleIndex(index levelIdx);\n\n\n};\n\n} /* namespace NetworKit */\n\n#endif /* LEVELHIERARCHY_H_ */\n" }, { "alpha_fraction": 0.8529411554336548, "alphanum_fraction": 0.8529411554336548, "avg_line_length": 84, "blob_id": "fb2dba6319b5ac9c49a12417b170b416b9bd36d6", "content_id": "27148403ca9abf407a3ccffbbb297c5ce15bd726", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "permissive", "max_line_length": 149, "num_lines": 2, "path": "/networkit/graph.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "# extension imports\nfrom _NetworKit import Graph, BFS, Dijkstra, DynBFS, DynDijkstra, SpanningForest, GraphTools, RandomMaximumSpanningForest, UnionMaximumSpanningForest\n" }, { "alpha_fraction": 0.6567103266716003, "alphanum_fraction": 0.7066285014152527, "avg_line_length": 25.55434799194336, "blob_id": "8ab9274bd22bda601b849b5d1298e70645c86b37", "content_id": "9a33d62d3ff149d8408eb73739568c61a9a1faba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2444, "license_type": "permissive", "max_line_length": 95, "num_lines": 92, "path": "/networkit/cpp/algebraic/test/LaplacianMatrixGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LaplacianMatrixGTest.cpp\n *\n * Created on: 25.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"LaplacianMatrixGTest.h\"\n\nnamespace NetworKit {\n\nLaplacianMatrixGTest::LaplacianMatrixGTest() {\n}\n\nLaplacianMatrixGTest::~LaplacianMatrixGTest() {\n}\n\nTEST(LaplacianMatrixGTest, testSmallLaplacianMatrix) {\n\tGraph graph(6);\n\tgraph.addEdge(0, 0); // self-loop\n\tgraph.addEdge(0, 1);\n\tgraph.addEdge(0, 4);\n\tgraph.addEdge(1, 4);\n\tgraph.addEdge(1, 2);\n\tgraph.addEdge(2, 3);\n\tgraph.addEdge(3, 4);\n\tgraph.addEdge(3, 5);\n\n\tLaplacianMatrix laplacianMatrix(graph);\n\tASSERT_EQ(graph.numberOfNodes(), laplacianMatrix.numberOfRows());\n\tASSERT_EQ(graph.numberOfNodes(), laplacianMatrix.numberOfColumns());\n\n\tEXPECT_EQ(2, laplacianMatrix(0,0));\n\tEXPECT_EQ(-1, laplacianMatrix(0,1));\n\tEXPECT_EQ(0, laplacianMatrix(0,2));\n\tEXPECT_EQ(0, laplacianMatrix(0,3));\n\tEXPECT_EQ(-1, laplacianMatrix(0,4));\n\tEXPECT_EQ(0, laplacianMatrix(0,5));\n\tEXPECT_EQ(3, laplacianMatrix(1,1));\n\tEXPECT_EQ(-1, laplacianMatrix(1,2));\n\tEXPECT_EQ(0, laplacianMatrix(1,3));\n\tEXPECT_EQ(-1, laplacianMatrix(1,4));\n\tEXPECT_EQ(0, laplacianMatrix(1,5));\n\tEXPECT_EQ(2, laplacianMatrix(2,2));\n\tEXPECT_EQ(-1, laplacianMatrix(2,3));\n\tEXPECT_EQ(0, laplacianMatrix(2,4));\n\tEXPECT_EQ(0, laplacianMatrix(2,5));\n\tEXPECT_EQ(3, laplacianMatrix(3,3));\n\tEXPECT_EQ(-1, laplacianMatrix(3,4));\n\tEXPECT_EQ(-1, laplacianMatrix(3,5));\n\tEXPECT_EQ(3, laplacianMatrix(4,4));\n\tEXPECT_EQ(0, laplacianMatrix(4,5));\n\tEXPECT_EQ(1, laplacianMatrix(5,5));\n\n\n\t// directed, weighted graph\n\tGraph dGraph(4, true, true);\n\tdGraph.addEdge(0,0,-1);\n\tdGraph.addEdge(0,1,3);\n\tdGraph.addEdge(1,3,42);\n\tdGraph.addEdge(3,1, -4);\n\n\tlaplacianMatrix = LaplacianMatrix(dGraph);\n\tEXPECT_EQ(3, laplacianMatrix(0,0));\n\tEXPECT_EQ(-3, laplacianMatrix(0,1));\n\tEXPECT_EQ(-42, laplacianMatrix(1,3));\n\tEXPECT_EQ(42, laplacianMatrix(1,1));\n\tEXPECT_EQ(-4, laplacianMatrix(3,3));\n\tEXPECT_EQ(4, laplacianMatrix(3,1));\n}\n\nTEST(LaplacianMatrixGTest, testLaplacianMatrixOfLesmisGraph) {\n\t// read lesmis graph\n\tMETISGraphReader graphReader;\n\tGraph graph = graphReader.read(\"input/lesmis.graph\");\n\n\t// create LaplacianMatrix\n\tLaplacianMatrix mat(graph);\n\n\tmat.forNonZeroElementsInRowOrder([&](const index row, const index column, const double value){\n\t\tif (row == column) {\n\t\t\tEXPECT_EQ(graph.weightedDegree(row) - graph.weight(row, row), value);\n\t\t} else {\n\t\t\tEXPECT_EQ(-graph.weight(row, column), value);\n\t\t}\n\t});\n\n}\n\n\n\n} /* namespace NetworKit */\n\n" }, { "alpha_fraction": 0.6331738233566284, "alphanum_fraction": 0.6459330320358276, "avg_line_length": 15.076923370361328, "blob_id": "458b1b7c10c1f45d973516763107a4f7a8e757da", "content_id": "56e7ca5454d169806f1dea4f5ff4b11178fd2764", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 627, "license_type": "permissive", "max_line_length": 110, "num_lines": 39, "path": "/networkit/cpp/centrality/Sfigality.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Sfigality.h\n *\n * Created on: 20.01.2016\n * Author: Elisabetta Bergamini, Christian Staudt\n */\n\n#ifndef SFIGALITY_H_\n#define SFIGALITY_H_\n\n#include \"Centrality.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n * A\n */\nclass Sfigality: public NetworKit::Centrality {\npublic:\n\t/**\n\t * Constructs the Sfigality class for the given Graph @a G.\n\t *\n\t * @param G The graph.\n\n\t */\n\tSfigality(const Graph& G);\n\n\tvoid run() override;\n\n\t/**\n\t * @return the theoretical maximum degree centrality, which is $n$ (including the possibility of a self-loop)\n\t */\n\tdouble maximum() override;\n};\n\n} /* namespace NetworKit */\n\n#endif /* SFIGALITY_H_ */\n" }, { "alpha_fraction": 0.5798332095146179, "alphanum_fraction": 0.6427596807479858, "avg_line_length": 23.335792541503906, "blob_id": "e8f1dacd138a0757032def0c874ff001c56ae6ce", "content_id": "26f690ef1c71ba6a19940bbfec465caa0cd5e7d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6595, "license_type": "permissive", "max_line_length": 79, "num_lines": 271, "path": "/networkit/cpp/distance/test/DistanceGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DistanceGTest.cpp\n *\n * Created on: Sep 04, 2015\n * Author: Maximilian Vogel\n */\n#ifndef NOGTEST\n\n#include \"DistanceGTest.h\"\n\n#include \"../Diameter.h\"\n#include \"../EffectiveDiameter.h\"\n#include \"../ApproxEffectiveDiameter.h\"\n#include \"../ApproxHopPlot.h\"\n#include \"../NeighborhoodFunction.h\"\n#include \"../ApproxNeighborhoodFunction.h\"\n\n#include \"../../generators/DorogovtsevMendesGenerator.h\"\n#include \"../../generators/ErdosRenyiGenerator.h\"\n#include \"../../io/METISGraphReader.h\"\n\nnamespace NetworKit {\n\nTEST_F(DistanceGTest, testVertexDiameterPedantically) {\n\tDorogovtsevMendesGenerator generator(1000);\n\tGraph G1 = generator.generate();\n\tGraph G = Graph(G1, true, false);\n\tDiameter diam(G, DiameterAlgo::estimatedPedantic);\n\tdiam.run();\n\tcount vd = diam.getDiameter().first;\n\tEXPECT_EQ(1000, vd);\n}\n\nTEST_F(DistanceGTest, testExactDiameter) {\n\n\tusing namespace std;\n\n\tvector<pair<string, count>> testInstances= {pair<string, count>(\"lesmis\", 14),\n\t\t\t\t\t\t\t\t\t\t\t pair<string, count>(\"jazz\", 6),\n\t\t\t\t\t\t\t\t\t\t\t pair<string, count>(\"celegans_metabolic\", 7)\n\t\t\t\t\t\t\t\t\t\t\t };\n\n\tfor (auto testInstance : testInstances) {\n\t\tMETISGraphReader reader;\n\t\tGraph G = reader.read(\"input/\" + testInstance.first + \".graph\");\n\t\tDiameter diam(G, DiameterAlgo::exact);\n\t\tdiam.run();\n\t\tcount diameter = diam.getDiameter().first;\n\t\tEXPECT_EQ(diameter, testInstance.second);\n\t}\n}\n\n\nTEST_F(DistanceGTest, testEstimatedDiameterRange) {\n\n\tusing namespace std;\n\n vector<pair<string, count>> testInstances= {\n\t\t\t\t\t\t\t\t\t\t\t pair<string, count>(\"celegans_metabolic\", 7),\n\t\t\t\t\t\t\t\t\t\t\t pair<string, count>(\"jazz\", 6)\n\t\t\t\t\t\t\t\t\t\t\t };\n\n\tfor (auto testInstance : testInstances) {\n\t\tMETISGraphReader reader;\n\t\tGraph G = reader.read(\"input/\" + testInstance.first + \".graph\");\n\t\tDiameter diam(G, DiameterAlgo::estimatedRange, 0.1);\n\t\tdiam.run();\n\t\tstd::pair<count, count> range = diam.getDiameter();\n\t\tEXPECT_GE(testInstance.second, range.first);\n\t\tEXPECT_LE(testInstance.second, range.second);\n\t}\n}\nTEST_F(DistanceGTest, testPedanticDiameterErdos) {\n\tcount n = 5000;\n\tErdosRenyiGenerator gen(n,0.001);\n\tGraph G1 = gen.generate();\n\tDiameter diam(G1, DiameterAlgo::estimatedPedantic);\n\tdiam.run();\n\tcount diameter = diam.getDiameter().first;\n\tASSERT_LE(diameter, n);\n}\n\n\nTEST_F(DistanceGTest, testEffectiveDiameterMinimal) {\n\t// Minimal example from the paper\n\tGraph G(5);\n\tG.addEdge(0,1);\n\tG.addEdge(1,2);\n\tG.addEdge(2,3);\n\tG.addEdge(3,4);\n\tG.addEdge(4,0);\n\tApproxEffectiveDiameter aef(G);\n\taef.run();\n\tdouble effective = aef.getEffectiveDiameter();\n\tDiameter diam(G, DiameterAlgo::exact);\n\tdiam.run();\n\tcount exact = diam.getDiameter().first;\n\tEXPECT_LE(effective, exact);\n}\n\nTEST_F(DistanceGTest, testEffectiveDiameter) {\n\nusing namespace std;\n\nvector<string> testInstances= {\"celegans_metabolic\", \"jazz\", \"lesmis\"};\n\nfor (auto testInstance : testInstances) {\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/\" + testInstance + \".graph\");\n\tApproxEffectiveDiameter aef(G);\n\taef.run();\n\tdouble effective = aef.getEffectiveDiameter();\n\tDiameter diam(G, DiameterAlgo::exact);\n\tdiam.run();\n\tcount exact = diam.getDiameter().first;\n\tEXPECT_LE(effective, exact);\n}\n}\n\nTEST_F(DistanceGTest, testEffectiveDiameterExact) {\n\n\tusing namespace std;\n\n\tvector<string> testInstances= {\"celegans_metabolic\", \"jazz\", \"lesmis\"};\n\n\tfor (auto testInstance : testInstances) {\n\t\tMETISGraphReader reader;\n\t\tGraph G = reader.read(\"input/\" + testInstance + \".graph\");\n\t\tEffectiveDiameter ed(G);\n\t\ted.run();\n\t\tdouble effective = ed.getEffectiveDiameter();\n\t\tDiameter diam(G, DiameterAlgo::exact);\n\t\tdiam.run();\n\t\tcount exact = diam.getDiameter().first;\n\t\tEXPECT_LE(effective, exact);\n\t}\n\n\tconst double tol = 1e-3;\n\n\t/* Graph: n=20, threshold: 20*0.9 = 18 nodes\n\t\t1--3--5--7---9\n\t\t| | | | |\n\t\t2--4--6--8--10\n\t\t\t| |\n\t\t\t11----12\n\t\t\t\t|\n\t\t\t13--14--15\n\t\t\t\t|\n\t\t\t18--16--17--19\n\t\t\t\t\t|\n\t\t\t\t\t20\n\tNumber of steps needed per node: (1-20)\n\t(7+6+6+5+6+5+5+4+6+5+4+4+5+4+5+5+6+6+7+7) / 20 = 5.4\n\t*/\n\t\tcount n1 = 20;\n\t\tGraph G1(n1);\n\n\t\tG1.addEdge(0,1);\n\t\tG1.addEdge(0,2);\n\t\tG1.addEdge(1,3);\n\t\tG1.addEdge(2,3);\n\t\tG1.addEdge(2,4);\n\t\tG1.addEdge(3,5);\n\t\tG1.addEdge(3,10);\n\t\tG1.addEdge(4,5);\n\t\tG1.addEdge(4,6);\n\t\tG1.addEdge(5,7);\n\t\tG1.addEdge(6,8);\n\t\tG1.addEdge(6,7);\n\t\tG1.addEdge(7,9);\n\t\tG1.addEdge(7,11);\n\t\tG1.addEdge(8,9);\n\t\tG1.addEdge(10,11);\n\t\tG1.addEdge(11,13);\n\t\tG1.addEdge(12,13);\n\t\tG1.addEdge(13,14);\n\t\tG1.addEdge(13,15);\n\t\tG1.addEdge(15,16);\n\t\tG1.addEdge(15,17);\n\t\tG1.addEdge(16,18);\n\t\tG1.addEdge(16,19);\n\n\t\tEffectiveDiameter ed(G1);\n\t\ted.run();\n\t\tdouble effective1 = ed.getEffectiveDiameter();\n\t\tEXPECT_NEAR(5.4, effective1, tol);\n\n\t\t/* Graph: n=21, threshold: 21*0.9 = 18.9 => 19 nodes\n\t\t\t\t\t13---------------3\n\t\t\t\t\t\t| |\n\t\t\t\t\t---14--12--| |\n\t\t\t\t\t| | | | |\n\t\t1--21--18--16--15 | | |\n\t\t\t\t\t| | | |\n\t\t\t20--17------10--8 |\n\t\t\t\t\t| | | |\n\t\t\t\t19 9--7--5--6--4--11\n\t\t\t\t\t\t\t\t\t\t|\n\t\t\t\t\t\t\t\t\t\t2\n\tNumber of steps needed per node: (1-21)\n\t(8+7+5+6+6+6+5+5+5+5+7+5+4+4+5+5+5+6+6+6+7) / 21 = 5.619047\n\t*/\n\t\tcount n2 = 21;\n\t\tGraph G2(n2);\n\n\t\tG2.addEdge(0,20);\n\t\tG2.addEdge(1,3);\n\t\tG2.addEdge(2,3);\n\t\tG2.addEdge(2,12);\n\t\tG2.addEdge(3,5);\n\t\tG2.addEdge(3,10);\n\t\tG2.addEdge(4,5);\n\t\tG2.addEdge(4,6);\n\t\tG2.addEdge(6,7);\n\t\tG2.addEdge(6,8);\n\t\tG2.addEdge(7,9);\n\t\tG2.addEdge(7,11);\n\t\tG2.addEdge(8,9);\n\t\tG2.addEdge(9,11);\n\t\tG2.addEdge(9,16);\n\t\tG2.addEdge(11,13);\n\t\tG2.addEdge(12,13);\n\t\tG2.addEdge(13,14);\n\t\tG2.addEdge(13,15);\n\t\tG2.addEdge(14,15);\n\t\tG2.addEdge(15,16);\n\t\tG2.addEdge(15,17);\n\t\tG2.addEdge(16,18);\n\t\tG2.addEdge(16,19);\n\t\tG2.addEdge(17,20);\n\n\t\tEffectiveDiameter ed2(G2);\n\t\ted2.run();\n\t\tdouble effective2 = ed2.getEffectiveDiameter();\n\t\tEXPECT_NEAR(5.619047, effective2, tol);\n}\n\nTEST_F(DistanceGTest, testHopPlot) {\n\tusing namespace std;\n\n\tvector<string> testInstances= {\"celegans_metabolic\", \"power\", \"lesmis\"};\n\n\tconst double tol = 1e-2;\n\n\tfor (auto& testInstance : testInstances) {\n\t\tMETISGraphReader reader;\n\t\tGraph G = reader.read(\"input/\" + testInstance + \".graph\");\n\t\tApproxHopPlot hp(G);\n\t\thp.run();\n\t\tmap<count, double> hopPlot = hp.getHopPlot();\n\t\tfor (count i=1; i < hopPlot.size(); i++) {\n\t\t\tEXPECT_LE(hopPlot[i-1], hopPlot[i]+tol);\n\t\t}\n\t}\n}\n\nTEST_F(DistanceGTest, testApproxNeighborhoodFunction) {\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/lesmis.graph\").toUnweighted();\n\tNeighborhoodFunction nf(G);\n\tnf.run();\n\tauto exact = nf.getNeighborhoodFunction();\n\tApproxNeighborhoodFunction anf(G);\n\tanf.run();\n\tauto approximated = anf.getNeighborhoodFunction();\n\tEXPECT_EQ(exact.size(), approximated.size());\n}\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.679755687713623, "alphanum_fraction": 0.6880453824996948, "avg_line_length": 28.766233444213867, "blob_id": "dad2626b754b9a4b1e0c618c7633afe367f1d1f9", "content_id": "b42c78187f4ca74cfee43bdbca0a685d0d9dd7dc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2292, "license_type": "permissive", "max_line_length": 167, "num_lines": 77, "path": "/networkit/cpp/centrality/DynApproxBetweenness.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynApproxBetweenness.h\n *\n * Created on: 31.07.2014\n * Author: ebergamini\n */\n\n#ifndef DYNAPPROXBETW_H_\n#define DYNAPPROXBETW_H_\n\n#include \"Centrality.h\"\n#include \"DynCentrality.h\"\n#include \"../dynamics/GraphEvent.h\"\n#include \"../graph/DynSSSP.h\"\n\n#include <math.h>\n#include <algorithm>\n#include <memory>\n#include <omp.h>\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n * Interface for dynamic approximated betweenness centrality algorithm.\n */\nclass DynApproxBetweenness: public Centrality, public DynCentrality {\n\npublic:\n /**\n * The algorithm approximates the betweenness of all vertices so that the scores are\n * within an additive error @a epsilon with probability at least (1- @a delta).\n * The values are normalized by default.\n *\n * @param\tG\t\t\tthe graph\n * @param\tstorePredecessors keep track of the lists of predecessors?\n * @param\tepsilon\t\tmaximum additive error\n * @param\tdelta\t\tprobability that the values are within the error guarantee\n * @param\tuniversalConstant\tthe universal constant to be used in\n * computing the sample size. It is 1 by default. Some references suggest\n * using 0.5, but there is no guarantee in this case.\n */\n DynApproxBetweenness(const Graph& G, const double epsilon=0.01, const double delta=0.1, const bool storePredecessors = true, const double universalConstant = 1.0);\n\n /**\n * Runs the static approximated betweenness centrality algorithm on the initial graph.\n */\n void run() override;\n\n /**\n * Updates the betweenness centralities after a batch of edge insertions on the graph.\n * Notice: it works only with edge insertions and the graph has to be connected.\n *\n * @param batch The batch of edge insertions.\n */\n void update(const std::vector<GraphEvent>& batch) override;\n\n /**\n * Get number of path samples used for last calculation\n */\n count getNumberOfSamples();\n\nprivate:\n bool storePreds = true;\n double epsilon; //!< maximum error\n double delta;\n double universalConstant;\n count r;\n std::vector<std::unique_ptr<DynSSSP>> sssp;\n std::vector<node> u;\n std::vector<node> v;\n std::vector <std::vector<node>> sampledPaths;\n};\n\n} /* namespace NetworKit */\n\n#endif /* DYNAPPROXBETW_H_ */\n" }, { "alpha_fraction": 0.6533575057983398, "alphanum_fraction": 0.6624319553375244, "avg_line_length": 23.488889694213867, "blob_id": "b8a833eaeeb426eeeb705f1055cd9dc2532bb107", "content_id": "82bff8f4b53c52bb3f8ebacf4a704c16ad1c5db2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1102, "license_type": "permissive", "max_line_length": 89, "num_lines": 45, "path": "/networkit/cpp/algebraic/LaplacianMatrix.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LaplacianMatrix.cpp\n *\n * Created on: 20.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"LaplacianMatrix.h\"\n\nnamespace NetworKit {\n\nLaplacianMatrix::LaplacianMatrix(const Graph &graph) : Matrix(graph.upperNodeIdBound()) {\n\tstd::vector<std::pair<index, index>> positions;\n\tstd::vector<double> values;\n\n\tgraph.forNodes([&](const index i){\n\t\tdouble weightedDegree = graph.weightedDegree(i);\n\n\t\tdouble selfLoopWeight = 0.0;\n\t\tgraph.forNeighborsOf(i, [&](const index j, double weight) { // - adjacency matrix\n\t\t\tif (j == i) {\n\t\t\t\tselfLoopWeight = weight;\n\t\t\t} else {\n\t\t\t\tpositions.push_back(std::make_pair(i,j));\n\t\t\t\tvalues.push_back(-weight);\n\t\t\t}\n\t\t});\n\n\t\tpositions.push_back(std::make_pair(i,i));\n\t\tvalues.push_back(weightedDegree - selfLoopWeight); // degree matrix\n\t});\n\n\tgraph.forNodes([&](const index i){\n\t\tdouble weightedDegree = graph.weightedDegree(i);\n\n\t\tgraph.forNeighborsOf(i, [&](const index j, double weight) { // - adjacency matrix\n\t\t\tsetValue(i, j, -weight);\n\t\t});\n\n\t\tsetValue(i, i, weightedDegree - graph.weight(i, i)); // degree matrix\n\t});\n}\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.4908471703529358, "alphanum_fraction": 0.493685245513916, "avg_line_length": 36.68449020385742, "blob_id": "15a479947860ce7dd5dca007a975f37613b68310", "content_id": "7be1187e61d47f067848ab5060238e5bddb0ff4c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7047, "license_type": "permissive", "max_line_length": 185, "num_lines": 187, "path": "/networkit/cpp/centrality/DynApproxBetweenness.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynApproxBetweenness.cpp\n *\n * Created on: 31.07.2014\n * Author: ebergamini\n */\n\n#include \"DynApproxBetweenness.h\"\n#include \"../auxiliary/Random.h\"\n#include \"../distance/Diameter.h\"\n#include \"../graph/Sampling.h\"\n#include \"../graph/DynDijkstra.h\"\n#include \"../graph/DynBFS.h\"\n#include \"../auxiliary/Log.h\"\n#include \"../auxiliary/NumericTools.h\"\n\n\nnamespace NetworKit {\n\nDynApproxBetweenness::DynApproxBetweenness(const Graph& G, const double epsilon, const double delta, const bool storePredecessors, const double universalConstant) : Centrality(G, true),\nstorePreds(storePredecessors), epsilon(epsilon), delta(delta), universalConstant(universalConstant) {\n INFO(\"Constructing DynApproxBetweenness. storePredecessors = \", storePredecessors);\n}\n\n\ncount DynApproxBetweenness::getNumberOfSamples() {\n return r;\n}\n\n\nvoid DynApproxBetweenness::run() {\n INFO(\"Inside DynApproxBetweenness. storePreds = \", storePreds);\n if (G.isDirected()) {\n throw std::runtime_error(\"Invalid argument: G must be undirected.\");\n }\n scoreData.clear();\n scoreData.resize(G.upperNodeIdBound());\n u.clear();\n v.clear();\n sampledPaths.clear();\n\n Diameter diam(G, DiameterAlgo::estimatedPedantic);\n diam.run();\n edgeweight vd = diam.getDiameter().first;\n\n INFO(\"estimated diameter: \", vd);\n r = ceil((universalConstant / (epsilon * epsilon)) * (floor(log2(vd - 2)) + 1 - log(delta)));\n INFO(\"taking \", r, \" path samples\");\n sssp.clear();\n sssp.resize(r);\n u.resize(r);\n v.resize(r);\n sampledPaths.resize(r);\n\n for (count i = 0; i < r; i++) {\n DEBUG(\"sample \", i);\n // sample random node pair\n u[i] = Sampling::randomNode(G);\n do {\n v[i] = Sampling::randomNode(G);\n } while (v[i] == u[i]);\n if (G.isWeighted()) {\n INFO(\"Calling DynDijkstra inside run DynApproxBet\");\n sssp[i].reset(new DynDijkstra(G, u[i], storePreds));\n } else {\n INFO(\"Calling DynBFS inside run DynApproxBet\");\n sssp[i].reset(new DynBFS(G, u[i], storePreds));\n }\n DEBUG(\"running shortest path algorithm for node \", u[i]);\n\n INFO(\"Calling setTargetNodeon sssp instance inside run DynApproxBet\");\n sssp[i]->setTargetNode(v[i]);\n INFO(\"Calling run on sssp instance inside run DynApproxBet\");\n sssp[i]->run();\n INFO(\"Ran sssp\");\n if (sssp[i]->distances[v[i]] > 0) { // at least one path between {u, v} exists\n DEBUG(\"updating estimate for path \", u[i], \" <-> \", v[i]);\n INFO(\"Entered if statement.\");\n // random path sampling and estimation update\n sampledPaths[i].clear();\n node t = v[i];\n while (t != u[i]) {\n INFO(\"Entered while statement\");\n // sample z in P_u(t) with probability sigma_uz / sigma_us\n std::vector<std::pair<node, double> > choices;\n if (storePreds) {\n for (node z : sssp[i]->previous[t]) {\n // workaround for integer overflow in large graphs\n bigfloat tmp = sssp[i]->numberOfPaths(z) / sssp[i]->numberOfPaths(t);\n double weight;\n tmp.ToDouble(weight);\n\n choices.emplace_back(z, weight); \t// sigma_uz / sigma_us\n }\n }\n else {\n INFO(\"Storepreds is false\");\n G.forInEdgesOf(t, [&](node t, node z, edgeweight w){\n if (Aux::NumericTools::logically_equal(sssp[i]->distances[t], sssp[i]->distances[z] + w)) {\n // workaround for integer overflow in large graphs\n INFO(\"Calling number of paths\");\n bigfloat tmp = sssp[i]->numberOfPaths(z) / sssp[i]->numberOfPaths(t);\n INFO(\"Called number of paths\");\n double weight;\n tmp.ToDouble(weight);\n\n choices.emplace_back(z, weight);\n }\n\n });\n }\n INFO(\"Node considered: \", t);\n INFO(\"Source considered: \", u[i]);\n assert (choices.size() > 0);\n node z = Aux::Random::weightedChoice(choices);\n assert (z <= G.upperNodeIdBound());\n if (z != u[i]) {\n scoreData[z] += 1 / (double) r;\n sampledPaths[i].push_back(z);\n }\n t = z;\n }\n }\n }\n\n hasRun = true;\n\n}\n\nvoid DynApproxBetweenness::update(const std::vector<GraphEvent>& batch) {\n INFO (\"Updating\");\n for (node i = 0; i < r; i++) {\n sssp[i]->update(batch);\n if (sssp[i]->modified()) {\n // subtract contributions to nodes in the old sampled path\n for (node z: sampledPaths[i]) {\n scoreData[z] -= 1 / (double) r;\n }\n // sample a new shortest path\n sampledPaths[i].clear();\n node t = v[i];\n while (t != u[i]) {\n // sample z in P_u(t) with probability sigma_uz / sigma_us\n std::vector<std::pair<node, double> > choices;\n if (storePreds) {\n for (node z : sssp[i]->previous[t]) {\n // workaround for integer overflow in large graphs\n bigfloat tmp = sssp[i]->numberOfPaths(z) / sssp[i]->numberOfPaths(t);\n double weight;\n tmp.ToDouble(weight);\n\n choices.emplace_back(z, weight);\n }\n }\n else {\n G.forInEdgesOf(t, [&](node t, node z, edgeweight w){\n if (Aux::NumericTools::logically_equal(sssp[i]->distances[t], sssp[i]->distances[z] + w)) {\n // workaround for integer overflow in large graphs\n bigfloat tmp = sssp[i]->numberOfPaths(z) / sssp[i]->numberOfPaths(t);\n double weight;\n tmp.ToDouble(weight);\n\n choices.emplace_back(z, weight);\n }\n });\n }\n assert (choices.size() > 0); // this should fail only if the graph is not connected\n if (choices.size() == 0) {\n INFO (\"node: \", t);\n INFO (\"source: \", u[i]);\n INFO (\"distance: \", sssp[i]->distances[t]);\n }\n node z = Aux::Random::weightedChoice(choices);\n assert (z <= G.upperNodeIdBound());\n if (z != u[i]) {\n scoreData[z] += 1 / (double) r;\n sampledPaths[i].push_back(z);\n }\n t = z;\n }\n\n }\n\n }\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6065722107887268, "alphanum_fraction": 0.6150135397911072, "avg_line_length": 24.705427169799805, "blob_id": "8a138b9f895f2eeb702b3137d94efc16eeef600b", "content_id": "3b205bcb7bd0024abc64d221cce1144856d791b0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3317, "license_type": "permissive", "max_line_length": 121, "num_lines": 129, "path": "/networkit/cpp/io/SNAPGraphReader.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SNAPGraphReader.cpp\n *\n * Created on: 19.05.2014\n * Author: Maximilian Vogel\n */\n\n#include \"SNAPGraphReader.h\"\n#include \"../auxiliary/StringTools.h\"\n#include \"../auxiliary/Log.h\"\n\n#include <sstream>\n#include <fstream>\n\nnamespace NetworKit {\n\nGraph SNAPGraphReader::read(const std::string& path) {\n\tstd::ifstream file;\n\tstd::string line; // the current line\n\n\t// read file once to get to the last line and figure out the number of nodes\n\t// unfortunately there is an empty line at the ending of the file, so we need to get the line before that\n\t\n\tfile.open(path);\n\tif (! file.good()) {\n\t\tthrow std::runtime_error(\"unable to read from file\");\n\t}\n\t\n\tstd::string previousLine;\n\tnode maxNode = 0;\n\tnode consecutiveID = 0;\n\t//std::unordered_map<node,node> mapNodeIds;\n\t\n\tstd::string commentPrefix = \"#\";\n\t\n\t// count firstNode = 0;\n\tchar separator = '\\t';\n\n\t//DEBUG(\"separator: \" , separator);\n\t//DEBUG(\"first node: \" , firstNode);\n\n\t// first find out the maximum node id\n\tDEBUG(\"first pass: create node ID mapping\");\n\tcount i = 0;\n\twhile (file.good()) {\n\t\t++i;\n\t\tstd::getline(file, line);\n\t\t// TRACE(\"read line: \" , line);\n\t\tif (line.compare(0, commentPrefix.length(), commentPrefix) == 0) {\n\t\t\t// TRACE(\"ignoring comment: \" , line);\n\t } else if (line.length() == 0) {\n \t\t// TRACE(\"ignoring empty line\");\n\t\t} else {\n\t\t\tstd::vector<std::string> split = Aux::StringTools::split(line, separator);\n\t\n\t\t\tif (split.size() == 2) {\n \t\t\tTRACE(\"split into : \" , split[0] , \" and \" , split[1]);\n\t\t\t\tnode u = std::stoul(split[0]);\n\t\t\t\tif(mapNodeIds.insert(std::make_pair(u,consecutiveID)).second) consecutiveID++;\n\t\t\t\tif (u > maxNode) {\n\t\t\t\t\tmaxNode = u;\n\t\t\t\t}\n\t\t\t\tnode v = std::stoul(split[1]);\n\t\t\t\tif(mapNodeIds.insert(std::make_pair(v,consecutiveID)).second) consecutiveID++;\n\n\t\t\t\tif (v > maxNode) {\n\t\t\t\t\tmaxNode = v;\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstd::stringstream message;\n\t\t\t\tmessage << \"malformed line \";\n\t\t\t\tmessage << i << \": \";\n\t\t\t\tmessage << line;\n\t\t\t\tthrow std::runtime_error(message.str());\n\t\t\t}\n\t\t}\n\t}\n\n\tfile.close();\n\n\t//maxNode = maxNode - firstNode + 1;\n\t//DEBUG(\"max. node id found: \" , maxNode);\n\n\t//Graph G(maxNode);\n\tDEBUG(\"found \",mapNodeIds.size(),\" unique node ids\");\n\tGraph G(mapNodeIds.size());\n\n\tDEBUG(\"second pass: add edges\");\n\tfile.open(path);\n\n // split the line into start and end node. since the edges are sorted, the start node has the highest id of all nodes\n\ti = 0; // count lines\n\twhile(std::getline(file,line)){\n \t++i;\n\t\tif (line.compare(0, commentPrefix.length(), commentPrefix) == 0) {\n\t\t\t// TRACE(\"ignoring comment: \" , line);\n\t\t} else {\n\t\t\t// TRACE(\"edge line: \" , line);\n\t\t\tstd::vector<std::string> split = Aux::StringTools::split(line, separator);\n\t\t\tstd::string splitZero = split[0];\n\t\t\tif (split.size() == 2) {\n\t\t\t\tnode u = mapNodeIds[std::stoi(split[0])];\n\t\t\t\tnode v = mapNodeIds[std::stoi(split[1])];\n\t\t\t\tif (!G.hasEdge(u,v) && !G.hasEdge(v,u)) {\n\t\t\t\t\tG.addEdge(u, v);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstd::stringstream message;\n\t\t\t\tmessage << \"malformed line \";\n\t\t\t\tmessage << i << \": \";\n\t\t\t\tmessage << line;\n\t\t\t\tthrow std::runtime_error(message.str());\n\t\t\t}\n\t\t}\n\t}\n\tDEBUG(\"read \",i,\" lines and added \",G.numberOfEdges(),\" edges\");\n\tfile.close();\n\n\tG.shrinkToFit();\n\treturn G;\n}\n\nstd::unordered_map<node,node> SNAPGraphReader::getNodeIdMap() {\n\treturn mapNodeIds;\n}\n\n\n\n}\n\n" }, { "alpha_fraction": 0.7007672786712646, "alphanum_fraction": 0.7161125540733337, "avg_line_length": 15.291666984558105, "blob_id": "0a939c3ed82a30701b291556581a21ee1f46616d", "content_id": "ab92cfeab9294c0b48aa2595ddebecb1dda073f2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 391, "license_type": "permissive", "max_line_length": 54, "num_lines": 24, "path": "/networkit/cpp/components/test/ConnectedComponentsGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ConnectedComponentsGTest.h\n *\n * Created on: Sep 16, 2013\n * Author: birderon\n */\n\n#ifndef NOGTEST\n\n#ifndef CONNECTEDCOMPONENTSGTEST_H_\n#define CONNECTEDCOMPONENTSGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass ConnectedComponentsGTest: public testing::Test {\npublic:\n};\n\n} /* namespace NetworKit */\n#endif /* CONNECTEDCOMPONENTSGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6790450811386108, "alphanum_fraction": 0.6959549188613892, "avg_line_length": 21.67669105529785, "blob_id": "c2e6d3f84e70c99741fca18ad6eaf55e2b398cf4", "content_id": "dcf0c656cdb32454871b96e1ff61f37db518a2f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3016, "license_type": "permissive", "max_line_length": 74, "num_lines": 133, "path": "/networkit/cpp/matching/test/MatcherGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MatcherGTest.cpp\n *\n * Created on: Feb 7, 2013\n * Author: Henning\n */\n\n#ifndef NOGTEST\n\n#include \"MatcherGTest.h\"\n#include \"../Matcher.h\"\n#include \"../Matching.h\"\n#include \"../PathGrowingMatcher.h\"\n#include \"../LocalMaxMatcher.h\"\n#include \"../../graph/Graph.h\"\n#include \"../../io/DibapGraphReader.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../auxiliary/Random.h\"\n\n\nnamespace NetworKit {\n\n\nTEST_F(MatcherGTest, testLocalMaxMatching) {\n\tcount n = 50;\n\tGraph G(n);\n\tG.forNodePairs([&](node u, node v){\n\t\tG.addEdge(u,v);\n\t});\n\n\tLocalMaxMatcher localMaxMatcher(G);\n\n\tTRACE(\"Start localMax matching\");\n\tlocalMaxMatcher.run();\n\tMatching M = localMaxMatcher.getMatching();\n\tTRACE(\"Finished localMax matching\");\n\n\tcount numExpEdges = n / 2;\n\tbool isProper = M.isProper(G);\n\tEXPECT_TRUE(isProper);\n\tEXPECT_EQ(M.size(G), numExpEdges);\n\n#if !defined _WIN32 && !defined _WIN64 && !defined WIN32 && !defined WIN64\n\tDibapGraphReader reader;\n\tGraph airfoil1 = reader.read(\"input/airfoil1.gi\");\n\tLocalMaxMatcher lmm(airfoil1);\n\tlmm.run();\n\tM = lmm.getMatching();\n\tisProper = M.isProper(airfoil1);\n\tEXPECT_TRUE(isProper);\n\tDEBUG(\"LocalMax on airfoil1 produces matching of size: \" , M.size(G));\n#endif\n}\n\nTEST_F(MatcherGTest, testLocalMaxMatchingDirectedWarning) {\n\tGraph G(2, false, true);\n\tG.addEdge(0,1);\n\tEXPECT_THROW(LocalMaxMatcher localMaxMatcher(G), std::runtime_error);\n}\n\n\nTEST_F(MatcherGTest, testPgaMatchingOnWeightedGraph) {\n\tcount n = 50;\n\tGraph G(n);\n\tG.forNodePairs([&](node u, node v){\n\t\tG.addEdge(u,v, Aux::Random::real());\n\t});\n\tPathGrowingMatcher pgaMatcher(G);\n\tpgaMatcher.run();\n}\n\nTEST_F(MatcherGTest, testPgaMatchingWithSelfLoops) {\n\tcount n = 50;\n\tGraph G(n);\n\tG.forNodePairs([&](node u, node v){\n\t\tG.addEdge(u,v, Aux::Random::real());\n\t});\n\tG.forNodes([&](node u){\n\t\tG.addEdge(u,u);\n\t});\n\tEXPECT_THROW(PathGrowingMatcher pgaMatcher(G),std::invalid_argument);\n\t//pgaMatcher.run();\n}\n\n\nTEST_F(MatcherGTest, testPgaMatching) {\n\tcount n = 50;\n\tGraph G(n);\n\tG.forNodePairs([&](node u, node v){\n\t\tG.addEdge(u,v);\n\t});\n\tPathGrowingMatcher pgaMatcher(G);\n\n\tDEBUG(\"Start PGA matching on 50-clique\");\n\n\tpgaMatcher.run();\n\tMatching M = pgaMatcher.getMatching();\n\n\tcount numExpEdges = n / 2;\n\tbool isProper = M.isProper(G);\n\tEXPECT_TRUE(isProper);\n\tEXPECT_EQ(M.size(G), numExpEdges);\n\tDEBUG(\"Finished PGA matching on 50-clique\");\n\n\n#if !defined _WIN32 && !defined _WIN64 && !defined WIN32 && !defined WIN64\n\tDibapGraphReader reader;\n\tGraph airfoil1 = reader.read(\"input/airfoil1.gi\");\n\tPathGrowingMatcher pga2(airfoil1);\n\tpga2.run();\n\tM = pga2.getMatching();\n\tisProper = M.isProper(airfoil1);\n\tEXPECT_TRUE(isProper);\n\tDEBUG(\"PGA on airfoil1 produces matching of size: \" , M.size(G));\n#endif\n}\n\nTEST_F(MatcherGTest, tryValidMatching) {\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"coAuthorsDBLP.graph\");\n\n\tLocalMaxMatcher pmatcher(G);\n\tpmatcher.run();\n\tMatching M = pmatcher.getMatching();\n\n\tbool isProper = M.isProper(G);\n\tEXPECT_TRUE(isProper);\n}\n\n\n} // namespace EnsembleClustering\n\n#endif\n" }, { "alpha_fraction": 0.7048386931419373, "alphanum_fraction": 0.7177419066429138, "avg_line_length": 15.756756782531738, "blob_id": "2994d1f27bf766c6f30e526d284883f56995f8aa", "content_id": "ab515b9f3caba4c376ed3ff9d764ea3a2bc7af33", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 620, "license_type": "permissive", "max_line_length": 62, "num_lines": 37, "path": "/networkit/cpp/generators/test/GeneratorsGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GeneratorsGTest.h\n *\n * Created on: 09.04.2013\n * Author: cls\n */\n\n#ifndef NOGTEST\n\n#ifndef GENERATORSGTEST_H_\n#define GENERATORSGTEST_H_\n\n#include <gtest/gtest.h>\n\n#include \"../HyperbolicGenerator.h\"\n#include \"../DynamicHyperbolicGenerator.h\"\n\nnamespace NetworKit {\n\nclass GeneratorsGTest: public testing::Test {\npublic:\n\tGeneratorsGTest();\n\n\tvector<double> getAngles(DynamicHyperbolicGenerator dynGen) {\n\t\treturn dynGen.angles;\n\t}\n\n\tvector<double> getRadii(DynamicHyperbolicGenerator dynGen) {\n\t\treturn dynGen.radii;\n\t}\n\n};\n\n} /* namespace NetworKit */\n#endif /* GENERATORSGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6293888092041016, "alphanum_fraction": 0.6749024987220764, "avg_line_length": 16.477272033691406, "blob_id": "feb2dd570d22522f4e8dda5e9293cbe7d7dc425f", "content_id": "16c93fc7a1eac3c40f539129ef5e44966ff2495c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 769, "license_type": "permissive", "max_line_length": 51, "num_lines": 44, "path": "/networkit/cpp/algebraic/test/IncidenceMatrixGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * IncidenceMatrixGTest.h\n *\n * Created on: 01.04.2014\n * Author: Michael\n */\n\n#ifndef NOGTEST\n\n#ifndef INCIDENCEMATRIXGTEST_H_\n#define INCIDENCEMATRIXGTEST_H_\n\n#include \"gtest/gtest.h\"\n#include \"../IncidenceMatrix.h\"\n#include \"../Vector.h\"\n#include \"../../graph/Graph.h\"\n\nnamespace NetworKit {\n\nclass IncidenceMatrixGTest : public testing::Test {\nprotected:\n\tvirtual void SetUp() {\n\t\tgraph = NetworKit::Graph(5, true);\n\t\tgraph.addEdge(0,1, 4.0);\n\t\tgraph.addEdge(0,2, 9.0);\n\t\tgraph.addEdge(0,3, 16.0);\n\t\tgraph.addEdge(2,3, 1.0);\n\t\tgraph.addEdge(4,1, 25.0);\n\t\tgraph.addEdge(4,4, 1.0);\n\t}\n\n\tNetworKit::Graph graph;\n\npublic:\n\tIncidenceMatrixGTest();\n\tvirtual ~IncidenceMatrixGTest();\n};\n\n\n} /* namespace NetworKit */\n\n#endif /* INCIDENCEMATRIXGTEST_H_ */\n\n#endif\n" }, { "alpha_fraction": 0.6702127456665039, "alphanum_fraction": 0.6789168119430542, "avg_line_length": 21, "blob_id": "cf5d5ecbf109faa2a0a3b231b848d185610d056a", "content_id": "dc48be66ed2f046127513417da2f69d81718bdc7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1034, "license_type": "permissive", "max_line_length": 78, "num_lines": 47, "path": "/networkit/cpp/graph/test/GraphGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphGTest.h\n *\n * Created on: 01.06.2014\n * Author: Klara Reichard ([email protected]), Marvin Ritter ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef GRAPHGTEST_H_\n#define GRAPHGTEST_H_\n\n#include <tuple>\n#include <gtest/gtest.h>\n\n#include \"../Graph.h\"\n\nnamespace NetworKit {\n\nclass GraphGTest: public testing::TestWithParam< std::tuple<bool, bool> > {\npublic:\n\tvirtual void SetUp();\n\nprotected:\n\tGraph Ghouse;\n\tstd::vector< std::pair<node, node> > houseEdgesOut;\n\tstd::vector< std::vector<edgeweight> > Ahouse;\n\tcount n_house;\n\tcount m_house;\n\n\tbool isGraph() const { return !isWeighted() && !isDirected(); }\n\tbool isWeightedGraph() const { return isWeighted() && !isDirected(); }\n\tbool isDirectedGraph() const { return !isWeighted() && isDirected(); }\n\tbool isWeightedDirectedGraph() const { return isWeighted() && isDirected(); }\n\n\n\tbool isWeighted() const;\n\tbool isDirected() const;\n\tGraph createGraph(count n = 0) const;\n\tcount countSelfLoopsManually(const Graph &G);\n};\n\n} /* namespace NetworKit */\n\n#endif /* GRAPHGTEST_H_ */\n\n#endif /* NOGTEST */\n" }, { "alpha_fraction": 0.6525423526763916, "alphanum_fraction": 0.6864407062530518, "avg_line_length": 18.66666603088379, "blob_id": "d47802c246ef69c155893aa615c3e0f4748c6f28", "content_id": "a0674b5f71befc93ce75514e476c111e0c61a9d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 472, "license_type": "permissive", "max_line_length": 178, "num_lines": 24, "path": "/networkit/cpp/auxiliary/test/AuxGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * AuxGTest.h\n *\n * Created on: 10.01.2013\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef AUXGTEST_H_\n#define AUXGTEST_H_\n\n// this define is an obscure fix for std::this_thread::sleep_for to work - the issue is described here: http://stackoverflow.com/questions/4438084/stdthis-threadsleep-for-and-gcc\n#define _GLIBCXX_USE_NANOSLEEP 1\n\n#include <gtest/gtest.h>\n\nclass AuxGTest: public testing::Test {\n\n};\n\n#endif /* AUXGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.5728476643562317, "alphanum_fraction": 0.5838851928710938, "avg_line_length": 20.452381134033203, "blob_id": "04c819d5aa5c291b2f51b8c7d35d2edeeba8f103", "content_id": "2d5c4de15515991a338adba47a132c1750e0fcf0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 906, "license_type": "permissive", "max_line_length": 112, "num_lines": 42, "path": "/networkit/cpp/algebraic/IncidenceMatrix.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * IncidenceMatrix.cpp\n *\n * Created on: 21.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"IncidenceMatrix.h\"\n\nnamespace NetworKit {\n\nIncidenceMatrix::IncidenceMatrix(const Graph &graph) : Matrix(graph.upperNodeIdBound(), graph.numberOfEdges()) {\n\tif (graph.isDirected()) {\n\t\tindex edgeId = 0;\n\t\tgraph.forEdges([&](node u, node v, edgeweight weight) {\n\t\t\tif (u != v) {\n\t\t\t\tedgeweight w = sqrt(weight);\n\t\t\t\tsetValue(u, edgeId, w);\n\t\t\t\tsetValue(v, edgeId, -w);\n\t\t\t}\n\t\t\tedgeId++;\n\t\t});\n\t} else {\n\t\tindex edgeId = 0;\n\t\tgraph.forEdges([&](node u, node v, edgeweight weight){\n\t\t\tif (u != v) {\n\t\t\t\tedgeweight w = sqrt(weight);\n\t\t\t\tif (u < v) { // orientation: small node number -> great node number\n\t\t\t\t\tsetValue(u, edgeId, w);\n\t\t\t\t\tsetValue(v, edgeId, -w);\n\t\t\t\t} else {\n\t\t\t\t\tsetValue(u, edgeId, -w);\n\t\t\t\t\tsetValue(v, edgeId, w);\n\t\t\t\t}\n\t\t\t}\n\t\t\tedgeId++;\n\t\t});\n\t}\n}\n\n\n} /* namespace NetworKit */\n\n\n\n\n\n" }, { "alpha_fraction": 0.5844694972038269, "alphanum_fraction": 0.6361885070800781, "avg_line_length": 21.568561553955078, "blob_id": "a495aca83a19f9f2701d35699d779698488914c9", "content_id": "761fd25e7e639a567a08488d7a699864be604727", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6748, "license_type": "permissive", "max_line_length": 85, "num_lines": 299, "path": "/networkit/cpp/structures/test/CoverGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CoverGTest.cpp\n *\n * Created on: 12.12.2013\n * Author: Maximilian Vogel ([email protected])\n */\n\n#include \"CoverGTest.h\"\n\n#include \"../Cover.h\"\n\n#include <iostream>\n\n#ifndef NOGTEST\n\nnamespace NetworKit {\n\n// stub\n/*TEST_F(CoverGTest, *) {\n}*/\n\n// stub to print every subset id of every entry\n/*\tc.forEntries([](index e, std::set<index> s) {\n\t\tauto current = s;\n\t\tif (!current.empty()) {\n\t\t\tstd::cout<<\"element \"<<e<<\" is in subsets \";\n\t\t\tfor (auto it = current.begin(); it != current.end(); it++) {\n\t\t\t\tstd::cout<<*it<<\" \";\n\t\t\t}\n\t\t\tstd::cout << std::endl;\n\t\t}\n\t});*/\n\nTEST_F(CoverGTest, testConstructor) {\n\tCover c(10);\n\tEXPECT_EQ(0u, c.lowerBound());\n\tEXPECT_EQ(1u, c.upperBound());\n}\n\nTEST_F(CoverGTest, testAllToSingletonsAndUpperBound) {\n\tCover c(10);\n\tEXPECT_EQ(1u, c.upperBound());\n\tc.allToSingletons();\n\tEXPECT_EQ(0u, c.lowerBound());\n\tEXPECT_EQ(11u, c.upperBound());\n}\n\nTEST_F(CoverGTest, testContains) {\n\tCover c(10);\n\tc.toSingleton(0);\n\tEXPECT_TRUE(c.contains(0));\n\tEXPECT_FALSE(c.contains(1));\n}\n\nTEST_F(CoverGTest, testUpperBoundAfterMerges) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i+=2) {\n\t\tindex sid = c.toSingleton(i);\n\t\tc.toSingleton(i+1);\n\t\tc.addToSubset(sid,i+1);\n\t}\n\tfor (index i = 0; i < n; i++) {\n\t\tc.addToSubset(i+1,0);\n\t}\n\tc.mergeSubsets(1,3);\n\tc.mergeSubsets(5,11);\n\tEXPECT_EQ(13u, c.upperBound());\n}\n\n\nTEST_F(CoverGTest, testToSingleton) {\n\tcount n = 10;\n\tCover c(n);\n\tc.allToSingletons();\n\tstd::set<index> controlSet2;\n\tcontrolSet2.insert(1);\n\tDEBUG(\"c[0] \", c[0], \" and controlSet2 \", controlSet2);\n\tEXPECT_TRUE(c[0] == controlSet2);\n\tc.addToSubset(5,0);\n\tc.addToSubset(2,0);\n\tc.addToSubset(3,0);\n\tc.addToSubset(4,0);\n\tc.addToSubset(0,1);\n\tc.toSingleton(0);\n\tstd::set<index> controlSet;\n\tcontrolSet.insert(11);\n\tDEBUG(\"c[0] \", c[0], \" and controlSet \", controlSet);\n\tEXPECT_TRUE(c[0] == controlSet);\n}\n\nTEST_F(CoverGTest, testAddToSubset) {\n\tcount n = 10;\n\tCover c(n);\n\tc.addToSubset(0,0);\n\tc.addToSubset(0,1);\n\tstd::set<index> controlSet = {0};\n\tEXPECT_TRUE(c.inSameSubset(0,1));\n\tEXPECT_TRUE(c[0] == controlSet);\n\tEXPECT_TRUE(c[1] == controlSet);\n}\n\n\nTEST_F(CoverGTest, testAddToSubset2) {\n\tcount n = 10;\n\tCover c(n);\n\tindex sid = c.toSingleton(0);\n\tc.addToSubset(sid,5);\n\tEXPECT_TRUE(c.inSameSubset(0,5));\n}\n\nTEST_F(CoverGTest, testMoveToSubset) {\n\tcount n = 10;\n\tCover c(n);\n\tc.allToSingletons();\n\tc.addToSubset(5,0);\n\tc.addToSubset(2,0);\n\tc.addToSubset(3,0);\n\tc.addToSubset(4,0);\n\tc.addToSubset(0,1);\n\tc.moveToSubset(8,0);\n\tstd::set<index> controlSet = {8};\n\tEXPECT_EQ(c[0],controlSet);\n}\n\nTEST_F(CoverGTest, testSubsetSizesWithUnassignedElements) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i+=2) {\n\t\tc.toSingleton(i);\n\t}\n\tstd::vector<index> controlSet = {1,1,1,1,1};\n\tEXPECT_EQ(c.subsetSizes(),controlSet);\n}\n\nTEST_F(CoverGTest, testSubsetSizesTrivial) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i++) {\n\t\tc.toSingleton(i);\n\t}\n\tstd::vector<index> controlSet = {1,1,1,1,1,1,1,1,1,1};\n\tEXPECT_EQ(c.subsetSizes(),controlSet);\n}\n\nTEST_F(CoverGTest, testSubsetSizesTrivial2) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i+=2) {\n\t\tc.toSingleton(i);\n\t}\n\tfor (index i = 1; i < n; i+=2) {\n\t\tc.addToSubset((i/2)+1,i);\n\t}\n\tstd::vector<index> controlSet = {2,2,2,2,2};\n\tEXPECT_EQ(c.subsetSizes(),controlSet);\n}\n\nTEST_F(CoverGTest, testSubsetSizesAssignedToMultipleSubsets) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i++) {\n\t\tc.toSingleton(i);\n\t}\n\tfor (index i = 1; i < n; i+=2) {\n\t\tc.addToSubset(i,i);\n\t}\n\tstd::vector<index> controlSet = {2,1,2,1,2,1,2,1,2,1};\n\tEXPECT_EQ(c.subsetSizes(),controlSet);\n}\n\nTEST_F(CoverGTest, testSubsetSizesAssignedToMultipleSubsets2) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i+=2) {\n\t\tindex sid = c.toSingleton(i);\n\t\tc.toSingleton(i+1);\n\t\tc.addToSubset(sid,i+1);\n\t}\n\tfor (index i = 0; i < n; i++) {\n\t\tc.addToSubset(i+1,0);\n\t}\n\tstd::vector<index> controlSet = {2,2,3,2,3,2,3,2,3,2};\n\tEXPECT_EQ(c.subsetSizes(),controlSet);\n}\n\nTEST_F(CoverGTest, testSubsetSizeMapMultipleSets) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i++) {\n\t\tc.toSingleton(i);\n\t}\n\tfor (index i = 1; i < n; i+=2) {\n\t\tc.addToSubset(i,i);\n\t}\n\tstd::map<index,count> controlMap;\n\tcontrolMap[1] = 2;\n\tcontrolMap[2] = 1;\n\tcontrolMap[3] = 2;\n\tcontrolMap[4] = 1;\n\tcontrolMap[5] = 2;\n\tcontrolMap[6] = 1;\n\tcontrolMap[7] = 2;\n\tcontrolMap[8] = 1;\n\tcontrolMap[9] = 2;\n\tcontrolMap[10] = 1;\n\tEXPECT_EQ(c.subsetSizeMap(),controlMap);\n}\n\nTEST_F(CoverGTest, testMergeSubsetsAndGetMembers) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i+=2) {\n\t\tindex sid = c.toSingleton(i);\n\t\tc.toSingleton(i+1);\n\t\tc.addToSubset(sid,i+1);\n\t}\n\tfor (index i = 0; i < n; i++) {\n\t\tc.addToSubset(i+1,0);\n\t}\n\tc.mergeSubsets(1,3);\n\tc.mergeSubsets(5,11);\n\tauto c11 = c.getMembers(11);\n\tstd::vector<index> controlSetSizes = {2,2,2,3,2,3,2,6};\n\t// remaining subset IDs 2,4,6,7,8,9,10,12\n\t// their sizes 2,2,2,3,2,3,2,6\n\tstd::set<index> controlSetMembers = {0,1,2,3,4,5};\n\tEXPECT_EQ(controlSetSizes,c.subsetSizes()); // check if subsets sizes are correct\n\tauto c12 = c.getMembers(12);\n\tEXPECT_EQ(c12,controlSetMembers); // check if elements of merged subsets are correct\n}\n\nTEST_F(CoverGTest, testNumberOfSubsets) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i+=2) {\n\t\tindex sid = c.toSingleton(i);\n\t\tc.toSingleton(i+1);\n\t\tc.addToSubset(sid,i+1);\n\t}\n\tfor (index i = 0; i < n; i++) {\n\t\tc.addToSubset(i+1,0);\n\t}\n\tEXPECT_EQ(n,c.numberOfSubsets());\n\tc.mergeSubsets(1,2);\n\tc.mergeSubsets(3,11);\n\tEXPECT_EQ(8u, c.numberOfSubsets());\n}\n\n\nTEST_F(CoverGTest, testSubsetsOf) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i+=2) {\n\t\tindex sid = c.toSingleton(i);\n\t\tc.toSingleton(i+1);\n\t\tc.addToSubset(sid,i+1);\n\t}\n\tfor (index i = 0; i < n; i++) {\n\t\tc.addToSubset(i+1,0);\n\t}\n\tauto subsetsOf0 = c.subsetsOf(0);\n\tstd::set<index> controlSet0 = {1,2,3,4,5,6,7,8,9,10};\n\tauto subsetsOf3 = c.subsetsOf(3);\n\tstd::set<index> controlSet3 = {3,4};\n\tEXPECT_EQ(controlSet0,subsetsOf0);\n\tEXPECT_EQ(controlSet3,subsetsOf3);\n\tc.mergeSubsets(1,3);\n\tc.mergeSubsets(5,11);\n\tsubsetsOf0 = c.subsetsOf(0);\n\tcontrolSet0 = {2,4,6,7,8,9,10,12};\n\tEXPECT_EQ(controlSet0,subsetsOf0);\n}\n\nTEST_F(CoverGTest, testInSameSubset) {\n\tcount n = 10;\n\tCover c(n);\n\tfor (index i = 0; i < n; i+=2) {\n\t\tindex sid = c.toSingleton(i);\n\t\tc.toSingleton(i+1);\n\t\tc.addToSubset(sid,i+1);\n\t}\n\tEXPECT_TRUE(c.inSameSubset(0,1));\n\tEXPECT_FALSE(c.inSameSubset(0,2));\n\tEXPECT_FALSE(c.inSameSubset(1,5));\n\tc.mergeSubsets(1,3);\n\tEXPECT_TRUE(c.inSameSubset(0,1));\n\tEXPECT_TRUE(c.inSameSubset(0,2));\n\tEXPECT_FALSE(c.inSameSubset(1,5));\n\tc.mergeSubsets(5,11);\n\tEXPECT_TRUE(c.inSameSubset(0,1));\n\tEXPECT_TRUE(c.inSameSubset(0,2));\n\tEXPECT_TRUE(c.inSameSubset(1,5));\n}\n\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6338028311729431, "alphanum_fraction": 0.6619718074798584, "avg_line_length": 13.199999809265137, "blob_id": "7e0755d54ea7a00a0067e6eb17cdae5079c52474", "content_id": "cee67902f46f15aabe54adbde8ba99ca613c387f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 284, "license_type": "permissive", "max_line_length": 43, "num_lines": 20, "path": "/networkit/cpp/dynamics/test/DynamicsGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynamicsGTest.h\n *\n * Created on: 24.12.2013\n * Author: cls\n */\n\n#ifndef DYNAMICSGTEST_H_\n#define DYNAMICSGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass DynamicsGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* DYNAMICSGTEST_H_ */\n" }, { "alpha_fraction": 0.6685552597045898, "alphanum_fraction": 0.7025495767593384, "avg_line_length": 31.090909957885742, "blob_id": "2a6582c4bca555b479ec68c59a51c29def34c653", "content_id": "d5b99248b7f5c5be77273c6b8179bfc849fe2106", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 353, "license_type": "permissive", "max_line_length": 105, "num_lines": 11, "path": "/Doc/doc/datasets.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "=========\nData sets\n=========\n\nOn this page, we provide links to data sets which can be used with NetworKit.\n\n- `10th DIMACS Implementation Challenge (2010-2012) <http://www.cc.gatech.edu/dimacs10/downloads.shtml>`_\n\n- `Stanford Network Analysis Project <http://snap.stanford.edu/>`_\n\n- `The Koblenz Network Collection <http://konect.uni-koblenz.de/>`_\n" }, { "alpha_fraction": 0.6998961567878723, "alphanum_fraction": 0.7011818289756775, "avg_line_length": 36.58921813964844, "blob_id": "9d490f8d8b62867fd354ded19be49bd4d2154be7", "content_id": "46cfa9bd22061b227446b11eacb1b5e57662ee57", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 20227, "license_type": "permissive", "max_line_length": 116, "num_lines": 538, "path": "/Doc/doc/api/DevGuide.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": ".. _devGuide:\n\nNetworKit Development Guide\n===========================\n\nThis text is meant to provide some guidelines for the ongoing\ndevelopment of the project. It is meant for core developers as well as\noccasional contributors.\n\nThe following text assumes some basic familiarity with the Mercurial\nversion control software. It is not a Mercurial tutorial, because you\nwill find a good one at `hginit.com <http://hginit.com>`__. Rather, it\nexplains concepts and workflows for the development of this project.\n\nIf you want to contribute, you should to consider the `technical\nreport <https://arxiv.org/pdf/1403.3005.pdf>`__ on NetworKit to get\nfamiliar with the architecture.\n\nIf you use NetworKit in your research publications, please cite the\nmentioned techincal report or the specific algorithm. A list of\npublications is available on the `website <TODO:%20add%20link>`__.\n\nHow to contribute\n-----------------\n\nReport bugs\n~~~~~~~~~~~\n\nFor the time being, bugs should be reported by sending a report to the\n`mailing\nlist <https://lists.ira.uni-karlsruhe.de/mailman/listinfo/networkit>`__.\nPlease provide a minimal example so that others can reproduce that bug.\n\nFork NetworKit\n~~~~~~~~~~~~~~\n\nFeel free to fork NetworKit on algohub and start contributing by fixing\nbugs or taking care of the issues at\n`kanboard.iti.kit.edu <https://kanboard.iti.kit.edu>`__. New and missing\nfeatures are welcomed aswell.\n\nRepositories\n------------\n\nThe NetworKit main development repository is at\nhttp://algohub.iti.kit.edu/parco/NetworKit/NetworKit. Access to this\nrepository is provided on request.\n\n`algohub.iti.kit.edu <http://algohub.iti.kit.edu>`__ (an installation of\n`RhodeCode <https://rhodecode.com/>`__) makes it easy to create and\nmanage forks. Forking is distinct from branching and creates a new\nrepository with a new address, its own access control etc. A fork\ncontains all branches of its parent.\n\nProject Tracker (Kanboard)\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nAt `kanboard.iti.kit.edu <https://kanboard.iti.kit.edu>`__ we maintain a\nproject task tracker to coordinate development and releases. An account\nis given on request, please ask on the mailing list. Tasks are moved\nfrom left to right to through the columns:\n\n- ``Backlog``: improvement ideas, some day maybe, \"nice to have\"\n- ``ToDo``: scheduled improvements\n- ``Work in progress``\n- ``To Review``: requesting peer review\n- ``Ready for Release``\n\nThere is the possibility to create \"swim lanes\" for different releases.\n\nBranches\n--------\n\nCurrently, the two most important branches of NetworKit are ``Dev`` and\n``default``.\n\n::\n\n ________ Dev\n ____/________ default\n\nAs the name says, ``default`` is the branch which you are on if you do\nnot switch. It is therefore the release branch, containing code which is\nready for use. Unless you are a core developer preparing a release or\nfixing an urgent bug, you do not make changes to ``default``.\n\n``Dev`` is the development branch and most of the development of new\nfeatures happens in this branch. This is also where new releases are\nbeing prepared. When pushing into this branch, think about whether your\ncode is ready for the core development team to work with and will be\nsuitable for a release in the foreseeable future.\n\nIt can be appropriate to create additional branches for projects,\nfeatures, developer teams etc. Creation of branches should be\ncoordinated with the core development team. For this purpose, post to\nthe `mailing\nlist <https://lists.ira.uni-karlsruhe.de/mailman/listinfo/networkit>`__.\n\nTags\n----\n\nA tag is nothing more than a “symbolic name” for a revision. In\nNetworKit tags are used to mark release versions in the ``default``\nbranch, with a ``MAJOR.MINOR`` version name scheme.\n\nWorkflows\n---------\n\nThis section describes how to work with branches and forks in different\nscenarios.\n\nUsing NetworKit\n~~~~~~~~~~~~~~~\n\nIf you want to build and use NetworKit and do not plan to contribute\nchanges, simply clone the repository. By default, you will be on the\n``default`` branch, which represents the current release. Follow the\nsetup instructions in the ``Readme``.\n\nCore Development\n~~~~~~~~~~~~~~~~\n\nThis section describes workflows for the core development team.\n\nBugfixes\n^^^^^^^^\n\nBugfixes are changes that should be immediately visible to users of\nNetworKit, such as solutions for urgent errors or improvements of the\n``Readme`` document. In this case, make the changes in the ``default``\nbranch and commit. Then switch to the ``Dev`` branch and merge the\n``default`` branch back into ``Dev``.\n\n::\n\n _______________ Dev\n / / < merge default into Dev\n ____/____________/____ default\n ^ bugfix\n\nExample:\n\n::\n\n hg up default\n ...\n hg com -m \"fixed bug xyz\"\n hg up Dev\n hg merge default\n hg com -m \"backmerge bugfix xyz\"\n\nReleasing New Features\n^^^^^^^^^^^^^^^^^^^^^^\n\nWhen new features should be released, the ``Dev`` branch is merged into\nthe ``default`` branch. Additional testing and cleanup is performed\nbefore that happens. The new major or minor release is then tagged with\na version number.\n\n::\n\n ______________________________________________________ Dev\n / ^ new feature prepare release ^ \\ < merge Dev into default\n ____/________________________________________\\______________ default\n ^ tag version\n\nExample:\n\n::\n\n hg up Dev\n hg com -m \"ready for release X.Y\"\n hg up default\n hg merge Dev\n hg com -m \"release X.Y\"\n\nMultiple heads in multiple branches\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIf remote changes have happened in multiple branches and you pull them,\nthese branch will have multiple heads. Merging now needs to happen for\neach of the affected branches before you can push. Switch to each branch\nand perform a merge as usual. As an alternative to merging, you may try\nthe ``rebase``\n`extension <https://www.mercurial-scm.org/wiki/RebaseExtension>`__.\n\nContributions\n~~~~~~~~~~~~~\n\nUsers of NetworKit are welcome to contribute their modifications. New\nfeatures must be added to the ``Dev`` branch, not the ``default``\nbranch. We recommend the following workflow:\n\n1. create a fork of the main repository\n2. switch to the ``Dev`` branch\n3. make and commit your changes while being on the ``Dev`` branch\n4. send a pull request to the main repository\n\nStudent Projects\n~~~~~~~~~~~~~~~~\n\nStudents with long-term projects like Bachelor's or Master's theses\nshould familiarize themselves with the guidelines and select a\nforking/branching model with their advisor.\n\nBranching Cheat Sheet\n---------------------\n\n- list all available branches: ``hg branches``\n- check on which branch you are: ``hg branch``\n- see heads (most recent commits) of all branches: ``hg head``\n- see tip (most recent commits) of the branch you are currently working\n on: ``hg tip``\n- switch to a specific branch: ``hg update <branchname>``\n- start a new branch: ``hg branch <branchname>``\n- merge ``branchY`` into ``branchX``: ``hg update branchX``, then\n ``hg merge branchY``\n\nConventions\n-----------\n\nThe following general conventions apply to all NetworKit developers.\n\nVersioning\n~~~~~~~~~~\n\n- Before you commit, make sure your code compiles and run the unit\n tests. Never push code which breaks the build for others.\n- Commit regularly and often to your local repository.\n- Use meaningful commit messages.\n- Get the newest changes from the repository regularly and merge them\n into your local repository.\n- Make sure that you merged correctly and did not break other people's\n work.\n- Push correct code early if possible. Merging is easier if all\n developers are up to date.\n- Never ``push --force`` to the main repository.\n\n.. _devGuide-unitTests:\n\nUnit Tests and Testing\n----------------------\n\nEvery new feature must be covered by a unit test. Omitting unit tests\nmakes it very likely that your feature will break silently as the\nproject develops, leading to unneccessary work in tracing back the\nsource of the error.\n\nUnit tests for the C++ part of NetworKit are based on the ``googletest``\nlibrary. For more information read the `googletest\nprimer <http://code.google.com/p/googletest/wiki/Primer>`__. The Python\ntest framework currently relies on ``nose`` to collect the tests.\n\n- Each source folder contains a ``test`` folder with ``googletest``\n classes. Create the unit tests for each feature in the appropriate\n ``test/*GTest`` class by adding a ``TEST_F`` function.\n- Prefix standard unit tests with ``test`` and experimental feature\n tests with ``try``. A ``test*`` must pass when pushed to the main\n repository, a ``try*`` is allowed to fail.\n- Keep the running time of test functions to the minimum needed for\n testing functionality. Testing should be fast, long-running unit\n tests look like infinite loops.\n- If the unit test requires a data set, add the file to the ``input/``\n folder. Only small data sets (a few kilobytes maximum) are acceptable\n in the repository.\n- Any output files produced by unit tests must be written to the\n ``output/`` folder.\n\nTo build and run the tests you need the `gtest\nlibrary <https://code.google.com/p/googletest/>`__. Assuming, gtest is\nsuccessfully installed and you add the paths to your build.conf, the\nunit tests should be compiled with:\n\n::\n\n scons --optimize=Dbg --target=Tests\n\nTo verify that the code was built correctly: Run all unit tests with\n\n::\n\n ./NetworKit-Tests-Dbg --tests/-t\n\nPerformance tests will be selected with\n\n::\n\n ./NetworKit-Tests-Dbg --benchmarks/-b\n\nwhile experimental tests are called with\n\n::\n\n ./NetworKit-Tests-Dbg --trials/-e\n\nTo run only specific unit tests, you can also add a filter expression,\ne. g.:\n\n::\n\n ./NetworKit-Tests-Dbg --gtest_filter=*PartitionGTest*/-f*PartitionGTest*\n\ninitiates unit tests only for the Partition data structure.\n\nFor the **Python** unit tests, run:\n\n::\n\n python3 setup.py test [--cpp-tests/-c]\n\nThis command will compile the \\_NetworKit extension and then run all\ntest cases on the Python layer. If you append ``--cpp-tests/-c``, the\nunit tests of the c++ side will be compiled and run before the Python\ntest cases.\n\nTest-driven development\n~~~~~~~~~~~~~~~~~~~~~~~\n\nIf you implement a new feature for NetworKit, we encourage you to adapt\nyour development process to test driven development. This means that you\nstart with a one or ideally several test-cases for your feature and then\nwrite the feature for the test case(s). If your feature is mostly\nimplemented in C++, you should write your test cases there. If you\nexpose your feature to Python, you should also write a test case for the\nextension module on the Python layer. The same applies for features in\nPyton.\n\nCode Style\n~~~~~~~~~~\n\n- Compiler warnings are likely to turn into future errors. Try to fix\n them as soon as they appear.\n- Read some code to get used to the code style and try to adopt it.\n- Document classes, methods and attributes in Doxygen style.\n- Use the ``count`` and ``index`` integer types for non-negative\n integer quantities and indices.\n- In most cases, objects are passed by reference. New objects are\n stack-allocated and returned by value. Avoid pointers and ``new``\n where possible.\n- Use the ``override`` keyword to indicate that a method overrides a\n virtual method in the superclass.\n- In Python, indent using tabs, not spaces.\n\nAlgorithm interface and class hierarchy\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nWe use the possibilities provided through inheritance to generalize the\ncommon behaviour of algorithm implementations:\n\n- Data and paramters should be passed in the constructor.\n- A void run()-method that takes no parameter triggers the execution.\n- To retrieve the result(s), getter-functions() may be defined.\n\nThe ``Algorithm`` base class also defines a few other other functions to\nquery whether the algorithm can be run in parallel or to retrieve a\nstring representation.\n\nThere may be more levels in the class hierarchy between an algorithm\nimplementation and the base class, e.g. a single-source shortest-path\nclass ``SSSP`` that generalizes the behaviour of BFS and Dijkstra\nimplementations or the ``Centrality`` base class. When implementing new\nfeatures or algorithms, make sure to adapt to the existing class\nhierarchies. The least thing to do is to inherit from the ``Algorithm``\nbase class. Changes to existing interfaces or suggestions for new\ninterfaces should be discussed through the `mailing\nlist <[email protected]>`__.\n\nExposing C++ Code to Python\n---------------------------\n\nAssuming the unit tests for the new feature you implemented are correct\nand successful, you need to make your features available to Python in\norder to use it. NetworKit uses Cython to bridge C++ and Python. All of\nthis bridge code is contained in the Cython code file\n``src/python/_Networkit.pyx``. The content is automatically translated\ninto C++ and then compiled to a Python extension module.\n\nCython syntax is a superset of Python that knows about static type\ndeclarations and other things from the C/C++ world. The best way to\ngetting used to it is working on examples. Take the most common case of\nexposing a C++ class as a Python class. Care for the following example\nthat exposes the class ``NetworKit::Dijkstra``:\n\n::\n\n cdef extern from \"cpp/graph/Dijkstra.h\":\n cdef cppclass _Dijkstra \"NetworKit::Dijkstra\"(_SSSP):\n _Dijkstra(_Graph G, node source, bool storePaths, bool storeStack, node target) except +\n\nThe code above exposes the C++ class definition to Cython - but not yet\nto Python. First of all, Cython needs to know which C++ declarations to\nuse so the the first line directs Cython to place an ``#include``\nstatement. The second line defines a class that is only accessible in\nthe Cython world. Our convention is that the name of the new class is\nthe name of the referenced C++ class with a prepended underscore to\navoid namespace conflicts. What follows is the \"real\" C++ name of the\nclass. After that, the declarations of the methods you want to make\navailable for Python are needed. The ``except +`` statement is necessary\nfor exceptions thrown by the C++ code to be rethrown as Python\nexceptions rather than causing a crash. Also, take care that the Cython\ndeclarations match the declarations from the referenced header file.\n\n::\n\n cdef extern from \"cpp/graph/SSSP.h\":\n cdef cppclass _SSSP \"NetworKit::SSSP\"(_Algorithm):\n _SSSP(_Graph G, node source, bool storePaths, bool storeStack, node target) except +\n vector[edgeweight] getDistances(bool moveOut) except +\n [...]\n\n cdef class SSSP(Algorithm):\n \"\"\" Base class for single source shortest path algorithms. \"\"\"\n\n cdef Graph _G\n\n def __init__(self, *args, **namedargs):\n if type(self) == SSSP:\n raise RuntimeError(\"Error, you may not use SSSP directly, use a sub-class instead\")\n\n def __dealloc__(self):\n self._G = None # just to be sure the graph is deleted\n\n def getDistances(self, moveOut=True):\n \"\"\"\n Returns a vector of weighted distances from the source node, i.e. the\n length of the shortest path from the source node to any other node.\n\n Returns\n -------\n vector\n The weighted distances from the source node to any other node in the graph.\n \"\"\"\n return (<_SSSP*>(self._this)).getDistances(moveOut)\n [...]\n\nWe mirror the class hierarchy of the C++ world also in Cython and\nPython. This also saves some boiler plate wrapping code as the functions\nshared by Dijkstra and BFS only need to be wrapped through SSSP.\n\n::\n\n cdef class Dijkstra(SSSP):\n \"\"\" Dijkstra's SSSP algorithm.\n\n Returns list of weighted distances from node source, i.e. the length of the shortest path from source to\n any other node.\n\n Dijkstra(G, source, [storePaths], [storeStack], target)\n\n Creates Dijkstra for `G` and source node `source`.\n\n Parameters\n ----------\n G : Graph\n The graph.\n source : node\n The source node.\n storePaths : bool\n store paths and number of paths?\n storeStack : bool\n maintain a stack of nodes in order of decreasing distance?\n target : node\n target node. Search ends when target node is reached. t is set to None by default.\n \"\"\"\n def __cinit__(self, Graph G, source, storePaths=True, storeStack=False, node target=none):\n self._G = G\n self._this = new _Dijkstra(G._this, source, storePaths, storeStack, target)\n\nFor the class to be accessible from the Python world, you need to define\na Python wrapper class which delegates method calls to the native class.\nThe Python class variable ``_this`` holds a pointer to an instance of\nthe native class. Please note that the parameters are now Python\nobjects. Method wrappers take these Python objects as parameters and\npass the internal native objects to the actuall C++ method call. The\nconstructor of such a wrapper class is called ``__cinit__``, and it\ncreates an instance of the native object.\n\nThe docstring between the triple quotation marks can be accessed through\nPython's ``help(...)`` function and are the main documentation of\nNetworKit. Always provide at least a short and precise docstring so the\nuser can get in idea of the functionality of the class. For C++ types\navailable to Python and further examples, see through the\n``_NetworKit.pyx``-file. The whole process has certainly some\nintricacies, e.g. some tricks are needed to avoid memory waste when\npassing around large objects such as graphs. When in doubt, look at\nexamples of similar classes already exposed. Listen to the Cython\ncompiler - coming from C++, its error messages are in general pleasantly\nhuman-readable.\n\nMake algorithms interruptable with CTRL+C/SIGINT\n------------------------------------------------\n\nWhen an algorithms takes too long to produce a result, it can be\ninterrupted with a SIGINT signal triggered by CTRL+C. When triggering\nfrom the Python shell while the runtime is in the C++ domain, execution\nis aborted and even terminates the Python shell. Therefor, we\nimplemented a signal handler infrastructure in C++ that raises a special\nexception instead of aborting. When implementing an algorithm, it is\nstrongly encouraged to integrate the signal handler into the\nimplementation. There are many examples of how to use it, e.g.\n``networkit/cpp/centrality/Betweenness.cpp`` or\n``networkit/cpp/community/PartitionFragmentation.cpp``\n\nContact\n-------\n\nTo discuss important changes to NetworKit, use the `e-mail\nlist <https://lists.ira.uni-karlsruhe.de/mailman/listinfo/networkit>`__\n(``[email protected]``).\n\nBuilding the documentation\n--------------------------\n\nThe class documentation and the website can be automatically generated\nwith sphinx. You will need the following software to generate the\ndocumentation and website:\n\n- `Sphinx <http://www.sphinx-doc.org>`__ (e.g. via\n ``pip3 install sphinx``)\n- `Pandoc <http://pandoc.org>`__\n- `Doxygen <http://www.stack.nl/~dimitri/doxygen/>`__\n\nAfter you installed the above mentioned software, you can build the\nclass documentation by calling ``./make_doc.sh`` in the folder\n``Doc/doc``. This will generate the class documentation for C++ and\nPython in ``Doc/Documentation``. Similarly, you can call\n``./make_www.sh`` to build the website. After the build finished, you\nfind the generated website in ``Doc/Website/``.\n\nFurther Reading\n---------------\n\n- `hginit.com <http://hginit.com>`__\n- `Working with named\n branches <http://humblecoder.co.uk/blog/2010/02/24/working-with-named-branches-in-mercurial/>`__\n- `Managing releases and branchy\n development <http://hgbook.red-bean.com/read/managing-releases-and-branchy-development.html>`__\n- `Cython Documentation <http://docs.cython.org/index.html>`__\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6773761510848999, "avg_line_length": 17.219512939453125, "blob_id": "170ebab407d166436e203a7ec33237bcd36bb0fb", "content_id": "0f5e7b90e213a35211c8f4d975ce29d7ec4e170a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 747, "license_type": "permissive", "max_line_length": 97, "num_lines": 41, "path": "/networkit/cpp/numerics/LAMG/Level/Level.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Level.cpp\n *\n * Created on: 09.01.2015\n * Author: Michael\n */\n\n#include \"Level.h\"\n#include \"../LAMGSettings.h\"\n\nnamespace NetworKit {\n\nLevel::Level(LevelType type) : type(type) {\n}\n\nLevel::Level(LevelType type, const CSRMatrix &A) : type(type), A(A) {\n}\n\nconst CSRMatrix& Level::getLaplacian() const {\n\treturn A;\n}\n\ncount Level::getNumberOfNodes() const {\n\treturn A.numberOfRows();\n}\n\nvoid Level::restrict(const Vector &bf, Vector &bc) const {\n}\n\nvoid Level::restrict(const Vector &bf, Vector &bc, std::vector<Vector> &bStages) const {\n}\n\nvoid Level::interpolate(const Vector &xc, Vector &xf) const {\n}\n\nvoid Level::interpolate(const Vector &xc, Vector &xf, const std::vector<Vector> &bStages) const {\n}\n\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6983201503753662, "alphanum_fraction": 0.7041518092155457, "avg_line_length": 34.35076904296875, "blob_id": "f23f0e656d0977279550c1c7dc27475276fb8548", "content_id": "8791bd4dd25cd1f09f8403c20dd099b991c1fb23", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11489, "license_type": "permissive", "max_line_length": 281, "num_lines": 325, "path": "/setup.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "import sys\n\n##################################\n# check Python version\n##################################\n\nif sys.version_info.major < 3:\n\tprint(\"ERROR: NetworKit requires Python 3.\")\n\tsys.exit(1)\n\nimport version\nfrom setup_util import *\nif \"setuptools\" not in sys.modules:\n\tfrom ez_setup import use_setuptools\n\t# in case setuptools is not installed\n\tuse_setuptools()\n\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.test import test as TestCommand\nfrom setuptools.command.build_ext import build_ext as SetuptoolsBuildExtCmd\nfrom setuptools.command.install import install as InstallCmd\n# from setuptools.command.clean import clean as CleanCmd\nimport unittest\n\nabort_installation = False\nerrorMessages = []\nwarnMessages = []\ntry:\n\timport Cython\n\tfrom Cython.Build import cythonize\n\tfrom Cython.Distutils import build_ext as CythonBuildExtCmd\n\tfrom distutils.version import LooseVersion\n\tif LooseVersion(Cython.__version__) >= LooseVersion('0.21'):\n\t\tcython_available = True\n\telse:\n\t\tcython_available = False\n\t\t#print(\"Cython version too old, please update\")\nexcept:\n\t# import so that the deriving class can still be there\n\tfrom setuptools.command.build_ext import build_ext as CythonBuildExtCmd\n\tabort_installation = False\n\tcython_available = False\n\t# errorMessages.append(\"ERROR: Cython not installed. Please install Cython and rerun\")\n\nimport multiprocessing\nimport os\nimport shutil\n\nimport subprocess\n\nfrom argparse import ArgumentParser\n\n##################################\n# check whether SCons is available\n##################################\nscons_available = None\ntry:\n\tif shutil.which(\"scons\") is None:\n\t\t# errorMessages.append(\"ERROR: Build system SCons is not installed. Please install and rerun\")\n\t\t# abort_installation = True\n\t\tscons_available = False\n\telse:\n\t\tscons_available = True\nexcept:\n\tprint(\"WARNING: unable to check whether build system SCons is installed\")\n\tscons_available = False\n\n#\nif sys.platform == 'Windows' and not scons_available:\n\tabort_installation = True\n\terrorMessages.append(\"ERROR: Build system SCons is not installed. Please install and rerun\")\n\n\n# compiler candidates\n# this list serves as a fallback when neither $CXX is set nor build.conf exists\ncandidates = [\"g++\", \"g++-6.1\", \"g++-6\", \"g++-5.3\", \"g++-5.2\", \"g++-5.1\", \"g++-5\", \"g++-4.9\", \"g++-4.8\", \"clang++\", \"clang++-3.8\", \"clang++-3.7\"]\nstdflag = None\n\n#######################################\n# read the build.conf IFF it exists\n#######################################\nif os.path.isfile(\"build.conf\"):\n\timport configparser\n\tconfPath = \"build.conf\"\n\n\tconf = configparser.ConfigParser()\n\tconf.read([confPath]) # read the configuration file\n\n\tcppComp = conf.get(\"compiler\", \"cpp\")\n\tif not cppComp in candidates:\n\t\t# insert specified compiler from build.conf at the beginning\n\t\tcandidates.insert(0, cppComp)\n\telse:\n\t\t# move candidate to the beginning\n\t\tcandidates.insert(0, candidates.pop(candidates.index(cppComp)))\n\n\t## C++14 support\n\tif stdflag is None:\n\t\ttry:\n\t\t\tstdflag = conf.get(\"compiler\", \"std14\")\n\t\texcept:\n\t\t\tpass\n\n\n#######################################\n# determine and set compiler or exit if there is no suitable compiler\n#######################################\n# temporarily disable compiler check on windows.\nif not sys.platform == 'Windows':\n\t# check CXX environment variable for default C++ compiler\n\ttry:\n\t\tdefault_candidate = os.environ[\"CXX\"]\n\t\tif not default_candidate in candidates:\n\t\t\t# insert specified compiler from build.conf at the beginning\n\t\t\tcandidates.insert(0, default_candidate)\n\t\telse:\n\t\t\t# move candidate to the beginning\n\t\t\tcandidates.insert(0, candidates.pop(candidates.index(default_candidate)))\n\texcept:\n\t\tpass\n\t# check if the specified compiler is suitable\n\tif stdflag is None or len(stdflag) == 0:\n\t\tcppcompiler, stdflag = determineCompiler(candidates, [\"c++14\",\"c++11\"])\n\telse:\n\t\tcppcompiler,_ = determineCompiler(candidates, [stdflag])\n\tif cppcompiler is not None:\n\t\tos.environ[\"CC\"] = cppcompiler\n\t\tos.environ[\"CXX\"] = cppcompiler\n\telse:\n\t\terrorMessages.append(\"ERROR: Test compilation with the following binary names was unsuccessful: {}. Make sure you have either g++ (>= 4.8) or clang++ (>= 3.7) properly installed\".format(\", \".join(candidates)))\n\t\tabort_installation = True\n\n# early abort installation in case the compiler requirements aren't satisfied\nif abort_installation:\n\tfor msg in errorMessages:\n\t\tprint(msg)\n\texit(1)\n\n\n################################################\n# get the optional arguments for the compilation\n################################################\nparser = ArgumentParser()\nparser.add_argument(\"-j\", \"--jobs\", dest=\"jobs\", help=\"specify number of jobs\")\nparser.add_argument(\"-o\", \"--optimize\", dest=\"optimize\", help=\"specify build type: Opt=optimize, Dbg=debug, Pro=profiling\")\nparser.add_argument(\"-c\", \"--with-cpp-tests\", dest=\"cpptests\", help=\"Also compile and run the C++ unit tests\",action='store_true')\n(options,args) = parser.parse_known_args()\n\n# set optional arguments to parsed ones or the default ones\nif options.jobs is not None:\n\tjobs = options.jobs\nelse:\n\tjobs = multiprocessing.cpu_count()\nif options.optimize is not None:\n\toptimize = options.optimize\nelse:\n\toptimize = \"Opt\"\n\n# make sure sys.argv is correct for setuptools\nsys.argv = [__file__] + args\n\n\n# this defintion probably has to stand here...\ndef build_NetworKit():\n\tif scons_available:\n\t\tcomp_cmd = [\"scons\", \"--optimize={0}\".format(optimize), \"--target=Core\", \"-j{0}\".format(jobs)]\n\t\t# scons is available, now check if the user has created a build.conf\n\t\tif not os.path.isfile(\"build.conf\"):\n\t\t\t# we assume, we're in a clone of the repository or in an archived copy of the repository and the user/developer has NOT created a build.conf\n\t\t\t# and therefore needs the information about the compiler\n\t\t\tcomp_cmd.append(\"--compiler={0}\".format(cppcompiler))\n\t\t\tcomp_cmd.append(\"--std={0}\".format(stdflag))\n\t\tprint(\"initializing NetworKit compilation with: {0}\".format(\" \".join(comp_cmd)))\n\t\tif not subprocess.call(comp_cmd) == 0:\n\t\t\tprint(\"scons returned an error, exiting setup.py\")\n\t\t\texit(1)\n\telse:\n\t\tfrom mbe import MinimalBuildEnvironment\n\t\t# minimal builder as fallback for scons\n\t\tdef_compile_flags = [\"-c\", \"-std={}\".format(stdflag), \"-Wall\", \"-fmessage-length=0\", \"-fPIC\", \"-fopenmp\"]\n\t\trelease_compile_flags = [\"-O3\", \"-DNDEBUG\", \"-DLOG_LEVEL=LOG_LEVEL_INFO\"]\n\t\tbuilder = MinimalBuildEnvironment(def_compile_flags,\"\",release_compile_flags,\"\",\"Opt\", cppcompiler, \"networkit/cpp\")\n\t\tbuilder.compile(\"Core\")\n\n# this defintion probably has to stand here...\ndef additional_clean():\n\tclean_cmd = [\"scons\", \"--optimize={0}\".format(optimize), \"--target=Core\", \"-c\"]\n\tsubprocess.call(clean_cmd)\n\tif cython_available and os.path.isfile(\"networkit/_NetworKit.cpp\") and \"clean\" in sys.argv:\n\t\tos.remove(\"networkit/_NetworKit.cpp\")\n\n\n#class CustomCleanCmd(CleanCmd):\n#\tdef initialize_options(self):\n#\t\tCleanCmd.initialize_options(self)\n\n#\tdef finalize_options(self):\n#\t\tCleanCmd.finalize_options(self)\n\n#\tdef run(self):\n#\t\tadditional_clean()\n#\t\tCleanCmd.run(self)\n\nclass CustomStBuildExtCmd(SetuptoolsBuildExtCmd):\n\tdef initialize_options(self):\n\t\tSetuptoolsBuildExtCmd.initialize_options(self)\n\n\tdef finalize_options(self):\n\t\tSetuptoolsBuildExtCmd.finalize_options(self)\n\n\tdef run(self):\n\t\tbuild_NetworKit()\n\t\tSetuptoolsBuildExtCmd.run(self)\n\nclass CustomCythonBuildExtCmd(CythonBuildExtCmd):\n\tdef initialize_options(self):\n\t\tCythonBuildExtCmd.initialize_options(self)\n\n\tdef finalize_options(self):\n\t\tCythonBuildExtCmd.finalize_options(self)\n\n\tdef run(self):\n\t\tbuild_NetworKit()\n\t\tif os.path.isfile(\"networkit/_NetworKit.cpp\"):\n\t\t\tos.remove(\"networkit/_NetworKit.cpp\")\n\t\tCythonBuildExtCmd.run(self)\n\nclass MyTestCommand(TestCommand):\n\tdef initialize_options(self):\n\t\tTestCommand.initialize_options(self)\n\n\tdef finalize_options(self):\n\t\tTestCommand.finalize_options(self)\n\n\tdef run(self):\n\t\tif options.cpptests:\n\t\t\toptimize = \"Dbg\"\n\t\t\tcomp_cmd = [\"scons\", \"--optimize={0}\".format(optimize), \"--target=Tests\", \"-j{0}\".format(jobs)]\n\t\t\tprint(\"initializing NetworKit compilation with: {0}\".format(\" \".join(comp_cmd)))\n\t\t\tif not subprocess.call(comp_cmd) == 0:\n\t\t\t\tprint(\"scons returned an error, exiting setup.py\")\n\t\t\t\texit(1)\n\t\t\trun_cpp_cmd = [\"./NetworKit-Tests-{0}\".format(optimize),\"-t\"]\n\t\t\tif subprocess.call(run_cpp_cmd) == 0:\n\t\t\t\tprint(\"C++ unit tests didn't report any errors\")\n\t\t\telse:\n\t\t\t\tprint(\"some C++ unit tests failed, see above\")\n\t\tTestCommand.run(self)\n\nclass CustomInstallCmd(InstallCmd):\n\tdef initialize_options(self):\n\t\tInstallCmd.initialize_options(self)\n\n\tdef finalize_options(self):\n\t\tInstallCmd.finalize_options(self)\n\n\tdef run(self):\n\t\t# run setuptools install command\n\t\tInstallCmd.run(self)\n\t\t# collect and print warnings about external packages used by NetworKit\n\t\twarnMessages = collectExternalPackageStatus()\n\t\tif len(warnMessages) > 0:\n\t\t\tfor msg in warnMessages:\n\t\t\t\tprint(msg)\n\t\t\tprint(\"Save this list and check for each package how to install it on your system.\")\n\n\nsrc = []\n# src can either be _NetworKit.pyx or the cythonized _NetworKit.cpp\ndo_cythonize = False\n# depending on the role in which the setup script is called, it will be determined if _NetworKit.pyx will be cythonized.\nbuild_ext_cmd = None\n# the `build_ext` command depends on the role of the setup script\nif not os.path.exists(\".hg\") and os.path.isfile(\"networkit/_NetworKit.cpp\"):\n\t#print(\"using pre-cythonized _NetworKit.cpp\")\n\t# we assume, were not in the repository, but installing networkit from a zip or via pip\n\tif os.path.isfile(\"MANIFEST.in\"):\n\t\tos.remove(\"MANIFEST.in\")\n\tsrc = [\"networkit/_NetworKit.cpp\"]\n\tbuild_ext_cmd = CustomStBuildExtCmd\nelif os.path.isfile(\"networkit/_NetworKit.pyx\") and cython_available:\n\t#print(\"cythonize _NetworKit.pyx to _NetworKit.cpp\")\n\t# remove _NetworKit.cpp to make room for cython\n\t#if cython_available and os.path.isfile(\"networkit/_NetworKit.cpp\"):\n\t#\tos.remove(\"networkit/_NetworKit.cpp\")\n\tbuild_ext_cmd = CustomCythonBuildExtCmd\n\tsrc = [\"networkit/_NetworKit.pyx\"]\n\tdo_cythonize = True\nelse:\n\tprint(\"ERROR: Some requirements aren't met.\\nIf you try to install/build NetworKit from a clone of the repository or a ZIP archive, make sure you have Cython (version >= 0.21) installed under the __same__ Python 3 version from which you tried to install NetworKit.\\nExiting...\"\"\")\n\texit(1)\n\n# initialize Extension module with the appropriate source file\nmodules = [Extension(\"_NetworKit\",\n\tsrc,\n\tlanguage = \"c++\",\n\textra_compile_args=[\"-fopenmp\", \"-std={}\".format(stdflag), \"-O3\", \"-DNOGTEST\"],\n\textra_link_args=[\"-fopenmp\", \"-std={}\".format(stdflag)],\n\tlibraries=[\"NetworKit-Core-{0}\".format(optimize)],\n\tlibrary_dirs=[\"./\"])]\n\nif do_cythonize:\n\tfor e in modules:\n\t\te.cython_directives = {\"embedsignature\" : True}\n\n# initialize the setup with the appropriate commands.\nsetup(\n\tname\t\t\t= version.name,\n\tversion\t\t\t= version.version,\n\tauthor\t\t\t= version.author,\n\tauthor_email\t= version.author_email,\n\turl\t\t\t\t= version.url,\n\tdownload_url\t= version.download_url,\n\tdescription\t\t= version.description,\n\tlong_description= version.long_description,\n\tlicense\t\t\t= version.license,\n\tpackages\t\t= find_packages(),\n\tpackage_data\t= {'networkit.profiling': ['html/*','latex/*','description/*']},\n\tkeywords\t\t= version.keywords,\n\tplatforms\t\t= version.platforms,\n\tclassifiers\t\t= version.classifiers,\n\tcmdclass\t\t= {'build_ext' : build_ext_cmd, 'test' : MyTestCommand, 'install' : CustomInstallCmd}, #'clean' : CustomCleanCmd,\n\ttest_suite\t\t= 'nose.collector',\n\text_modules\t\t= modules,\n\tzip_safe\t\t= False)\n" }, { "alpha_fraction": 0.6043348908424377, "alphanum_fraction": 0.6119847297668457, "avg_line_length": 28.412500381469727, "blob_id": "25632636297678656702f08171e753021db4e4c2", "content_id": "453164f4dbcb61de4bb97507700759035552de10", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7059, "license_type": "permissive", "max_line_length": 153, "num_lines": 240, "path": "/networkit/cpp/io/EdgeListReader.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * EdgeListReader.cpp\n *\n * Created on: 18.06.2013\n * Author: cls\n */\n\n#include \"EdgeListReader.h\"\n#include \"../auxiliary/Log.h\"\n\n#include <sstream>\n\n#include \"../auxiliary/Enforce.h\"\n\nnamespace NetworKit {\n\nEdgeListReader::EdgeListReader(const char separator, const node firstNode, const std::string commentPrefix, const bool continuous, const bool directed) :\n\tseparator(separator), commentPrefix(commentPrefix), firstNode(firstNode), continuous(continuous), mapNodeIds(), directed(directed) {\n//\tthis->mapNodeIds;i\n}\n\nGraph EdgeListReader::read(const std::string& path) {\n\tif (this->continuous) {\n\t\tDEBUG(\"read graph with continuous ids\");\n\t\treturn readContinuous(path);\n\t} else {\n\t\tDEBUG(\"read graph with NON continuous ids\");\n\t\treturn readNonContinuous(path);\n\t}\n}\n\nstd::map<std::string,node> EdgeListReader::getNodeMap() {\n\tif (this->continuous) throw std::runtime_error(\"Input files are assumed to have continuous node ids, therefore no node mapping has been created.\");\n\treturn this->mapNodeIds;\n}\n\nGraph EdgeListReader::readContinuous(const std::string& path) {\n\tstd::ifstream file(path);\n\tAux::enforceOpened(file);\n\tstd::string line; // the current line\n\n\t// read file once to get to the last line and figure out the number of nodes\n\t// unfortunately there is an empty line at the ending of the file, so we need to get the line before that\n\n\tnode maxNode = 0;\n\tbool weighted = false;\n\tbool checkedWeighted = false;\n\n\tDEBUG(\"separator: \" , this->separator);\n\tDEBUG(\"first node: \" , this->firstNode);\n\n\t// first find out the maximum node id\n\tDEBUG(\"first pass\");\n\tcount i = 0;\n\twhile (file.good()) {\n\t\t++i;\n\t\tstd::getline(file, line);\n\t\tTRACE(\"read line: \" , line);\n\t\tif (!line.empty()) {\n\t\t\tif (line.back() == '\\r') line.pop_back();\n\t\t\tif (line.compare(0, this->commentPrefix.length(), this->commentPrefix) == 0) {\n\t\t\t\tTRACE(\"ignoring comment: \" , line);\n\t\t\t} else {\n\t\t\t\tstd::vector<std::string> split = Aux::StringTools::split(line, this->separator);\n\t\t\t\tif (!checkedWeighted) {\n\t\t\t\t\tif (split.size() == 2) {\n\t\t\t\t\t\tweighted = false;\n\t\t\t\t\t} else if (split.size() == 3) {\n\t\t\t\t\t\tINFO(\"Identified graph as weighted.\");\n\t\t\t\t\t\tweighted = true;\n\t\t\t\t\t}\n\t\t\t\t\tcheckedWeighted = true;\n\t\t\t\t}\n\t\t\t\tif (split.size() == 2 || split.size() == 3) {\n\t\t\t\t\tTRACE(\"split into : \" , split[0] , \" and \" , split[1]);\n\t\t\t\t\tnode u = std::stoul(split[0]);\n\t\t\t\t\tif (u > maxNode) {\n\t\t\t\t\t\tmaxNode = u;\n\t\t\t\t\t}\n\t\t\t\t\tnode v = std::stoul(split[1]);\n\t\t\t\t\tif (v > maxNode) {\n\t\t\t\t\t\tmaxNode = v;\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tstd::stringstream message;\n\t\t\t\t\tmessage << \"malformed line \";\n\t\t\t\t\tmessage << i << \": \";\n\t\t\t\t\tmessage << line;\n\t\t\t\t\tthrow std::runtime_error(message.str());\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tDEBUG(\"line \", i, \" is empty.\");\n\t\t}\n\t}\n\tfile.close();\n\tmaxNode = maxNode - this->firstNode + 1;\n\tDEBUG(\"max. node id found: \" , maxNode);\n\n\tGraph G(maxNode, weighted, directed);\n\n\tDEBUG(\"second pass\");\n\tfile.open(path);\n\t// split the line into start and end node. since the edges are sorted, the start node has the highest id of all nodes\n\ti = 0; // count lines\n\twhile(std::getline(file,line)){\n\t\tif(*line.rbegin() == '\\r') line.pop_back();\n\t\t++i;\n\t\tif (line.compare(0, this->commentPrefix.length(), this->commentPrefix) == 0) {\n\t\t\t// TRACE(\"ignoring comment: \" , line);\n\t\t} else {\n\t\t\t// TRACE(\"edge line: \" , line);\n\t\t\tstd::vector<std::string> split = Aux::StringTools::split(line, this->separator);\n\t\t\tif (split.size() == 2) {\n\t\t\t\tnode u = std::stoul(split[0]) - this->firstNode;\n\t\t\t\tnode v = std::stoul(split[1]) - this->firstNode;\n\t\t\t if (!G.hasEdge(u,v)) {\n\t\t\t\t\tG.addEdge(u, v);\n\t\t\t\t}\n\t\t\t} else if (weighted && split.size() == 3) {\n\t\t\t\tnode u = std::stoul(split[0]) - this->firstNode;\n\t\t\t\tnode v = std::stoul(split[1]) - this->firstNode;\n\t\t\t\tdouble weight = std::stod(split[2]);\n\t\t\t if (!G.hasEdge(u,v)) {\n\t\t\t\t\tG.addEdge(u, v, weight);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstd::stringstream message;\n\t\t\t\tmessage << \"malformed line \";\n\t\t\t\tmessage << i << \": \";\n\t\t\t\tmessage << line;\n\t\t\t\tthrow std::runtime_error(message.str());\n\t\t\t}\n\t\t}\n\t}\n\tfile.close();\n\n\tG.shrinkToFit();\n\treturn G;\n}\n\n\nGraph EdgeListReader::readNonContinuous(const std::string& path) {\n\tstd::ifstream file(path);\n\tAux::enforceOpened(file);\n\tDEBUG(\"file is opened, proceed\");\n\tstd::string line; // the current line\n\tnode consecutiveID = 0;\n\n\tbool weighted = false;\n\tbool checkedWeighted = false;\n\n\t// first find out the maximum node id\n\tDEBUG(\"first pass: create node ID mapping\");\n\tcount i = 0;\n\twhile (file.good()) {\n\t\t++i;\n\t\tstd::getline(file, line);\n\t\tTRACE(\"read line: \" , line);\n\t\tif (!line.empty()) {\n\t\t\tif(line.back() == '\\r') line.pop_back();\n\t\t\tif (line.compare(0, this->commentPrefix.length(), this->commentPrefix) == 0) {\n\t\t\t\t TRACE(\"ignoring comment: \" , line);\n\t\t\t} else if (line.length() == 0) {\n\t\t\t\tTRACE(\"ignoring empty line\");\n\t\t\t} else {\n\t\t\t\tstd::vector<std::string> split = Aux::StringTools::split(line, this->separator);\n\t\t\t\tif (!checkedWeighted) {\n\t\t\t\t\tif (split.size() == 2) {\n\t\t\t\t\t\tweighted = false;\n\t\t\t\t\t} else if (split.size() == 3) {\n\t\t\t\t\t\tINFO(\"Identified graph as weighted.\");\n\t\t\t\t\t\tweighted = true;\n\t\t\t\t\t}\n\t\t\t\t\tcheckedWeighted = true;\n\t\t\t\t}\n\t\t\t\tif (split.size() == 2 || split.size() == 3) {\n\t\t\t\t\tTRACE(\"split into : \" , split[0] , \" and \" , split[1]);\n\t\t\t\t\tif(this->mapNodeIds.insert(std::make_pair(split[0],consecutiveID)).second) ++consecutiveID;\n\t\t\t\t\tif(this->mapNodeIds.insert(std::make_pair(split[1],consecutiveID)).second) ++consecutiveID;\n\t\t\t\t} else {\n\t\t\t\t\tstd::stringstream message;\n\t\t\t\t\tmessage << \"malformed line \";\n\t\t\t\t\tmessage << i << \": \";\n\t\t\t\t\tmessage << line;\n\t\t\t\t\tthrow std::runtime_error(message.str());\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tDEBUG(\"line \", i, \" is empty.\");\n\t\t}\n\t}\n\tfile.close();\n\n\tDEBUG(\"found \",this->mapNodeIds.size(),\" unique node ids\");\n\tGraph G(this->mapNodeIds.size(), weighted, directed);\n\n\tDEBUG(\"second pass: add edges\");\n\tfile.open(path);\n\n\t// split the line into start and end node. since the edges are sorted, the start node has the highest id of all nodes\n\ti = 0; // count lines\n\twhile(std::getline(file,line)){\n\t\tif(*line.rbegin() == '\\r') line.pop_back();\n ++i;\n\t\tif (line.compare(0, this->commentPrefix.length(), this->commentPrefix) == 0) {\n\t\t\t// TRACE(\"ignoring comment: \" , line);\n\t\t} else {\n\t\t\t// TRACE(\"edge line: \" , line);\n\t\t\tstd::vector<std::string> split = Aux::StringTools::split(line, this->separator);\n\t\t\tif (split.size() == 2) {\n\t\t\t\tnode u = this->mapNodeIds[split[0]];\n\t\t\t\tnode v = this->mapNodeIds[split[1]];\n\t\t\t\tif (!G.hasEdge(u,v)) {\n\t\t\t\t\tG.addEdge(u, v);\n\t\t\t\t}\n\t\t\t} else if (weighted && split.size() == 3) {\n\t\t\t\tnode u = this->mapNodeIds[split[0]];\n\t\t\t\tnode v = this->mapNodeIds[split[1]];\n\t\t\t\tdouble weight = std::stod(split[2]);\n\t\t\t if (!G.hasEdge(u,v)) {\n\t\t\t\t\tG.addEdge(u, v, weight);\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tstd::stringstream message;\n\t\t\t\tmessage << \"malformed line \";\n\t\t\t\tmessage << i << \": \";\n\t\t\t\tmessage << line;\n\t\t\t\tthrow std::runtime_error(message.str());\n\t\t\t}\n\t\t}\n\t}\n\tDEBUG(\"read \",i,\" lines and added \",G.numberOfEdges(),\" edges\");\n\tfile.close();\n\n\tG.shrinkToFit();\n\treturn G;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.7089201807975769, "alphanum_fraction": 0.7139906287193298, "avg_line_length": 25.361385345458984, "blob_id": "ab9e7bb79ca0486dde427cd9afeab83d6056c58f", "content_id": "f3e492a1c654b2419ff02de8ac7b9bbcc123f119", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5325, "license_type": "permissive", "max_line_length": 350, "num_lines": 202, "path": "/networkit/centrality.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "\"\"\" This module contains algorithms for the calculation of centrality, i.e. ranking nodes by their structural importance\nto the network \"\"\"\n\n\n__author__ = \"Christian Staudt\"\n__credits__ = [\"Christian Staudt\", \"Elisabetta Bergamini\", \"Henning Meyerhenke\", \"Marc Nemes\", \"Maximilian Vogel\"]\n\n# extension imports\n# TODO: (+) ApproxCloseness\nfrom _NetworKit import Betweenness, PageRank, EigenvectorCentrality, DegreeCentrality, ApproxBetweenness, ApproxBetweenness2, DynApproxBetweenness, Closeness, KPathCentrality, CoreDecomposition, KatzCentrality, LocalClusteringCoefficient, ApproxCloseness, LocalPartitionCoverage, Sfigality, SpanningEdgeCentrality, PermanenceCentrality, TopCloseness\n\n\n# local imports\nfrom networkit.algebraic import adjacencyEigenvector, PageRankMatrix, symmetricEigenvectors\n\n# external imports\nimport math\n\ndef ranking(G, algorithm=Betweenness, normalized=False):\n\t\"\"\" Return a ranking of nodes by the specified centrality type\"\"\"\n\t# FIXME: some centrality algorithms take more parameters\n\tcentrality = algorithm(G, normalized)\n\tcentrality.run()\n\treturn centrality.ranking()\n\ndef scores(G, algorithm=Betweenness, normalized=False):\n\t\"\"\" Return the centrality scores of nodes using the specified centrality type\"\"\"\n\tcentrality = algorithm(G, normalized)\n\tcentrality.run()\n\treturn centrality.scores()\n\n\n\ndef rankPerNode(ranking):\n\t\"\"\"\n\tParameters\n\t----------\n \tranking: ordered list of tuples (node, score)\n\n\tReturns\n\t_______\n\tfor each node (sorted by node ID), the ranking of the node\n\n\t\"\"\"\n\tn_nodes = len(ranking)\n\tranking_id = [0]*n_nodes\n\tfor index, pair in enumerate(ranking):\n\t\tranking_id[pair[0]] = index\n\t#we assign to all nodes the ranking of the first node with the same score\n\tfor index, pair in enumerate(ranking):\n\t\t\tif index == 0:\n\t\t\t\tcontinue\n\t\t\tif pair[1] == ranking[index-1][1]:\n\t\t\t\tprev_node = ranking[index-1][0]\n\t\t\t\tranking_id[pair[0]] = ranking_id[prev_node]\n\treturn ranking_id\n\n\ndef relativeRankErrors(rx, ry):\n\t\"\"\"\n\tLet $r_x(u)$ be the rank of node $u$ in ranking $x$.\n\tThe relative rank error of node $u$ is defined as\n\t\t$$r_x(u) / r_y(u)$$\n\n\n\tParameters\n\t----------\n\trx : list\n\t\tranking - ordered list of tuples (node, score)\n\n\try: list\n\t\tranking - ordered list of tuples (node, score)\n\n\tReturns\n\t_______\n\tlist of rank errors ordered by node ID\n\n\t\"\"\"\n\tdiff = []\n\tn = len(rx)\n\tif not(n == len(ry)):\n\t\treturn diff\n\trnode_x = rankPerNode(rx)\n\trnode_y = rankPerNode(ry)\n\tfor i in range(n):\n\t\tdiff.append((rnode_x[i]+1)/(rnode_y[i]+1))\n\treturn diff\n\n\nclass SpectralCentrality:\n\t\"\"\"\n\tAbstract class to compute the spectral centrality of a graph. This class needs to be supplied with methods\n\tto generate the correct matrices and do the correct normalization.\n\t\"\"\"\n\tdef __init__(self, G, normalized=False):\n\t\t\"\"\"\n\t\tConstructor.\n\n\t\tParameters\n\t\t----------\n\t\tG : graph\n\t\t\tThe graph of which to compute the centrality\n\t\tnormalized : boolean\n\t\t\t\t\t Whether to normalize the results or not\n\n\t\t\"\"\"\n\t\tsuper(SpectralCentrality, self).__init__()\n\n\t\tself.graph = G\n\t\tself.normalized = normalized\n\n\t\tself.scoreList = None\n\t\tself.rankList = None\n\t\tself.evz = {}\n\n\tdef prepareSpectrum(self):\n\t\t\"\"\" Method that must be implemented to set the following values:\n\t\tself.eigenvectors = list of eigenvectors desired for centrality measure\n\t\tself.eigenvalues = list of corresponding eigenvalues\n\t\t\"\"\"\n\t\traise NotImplemented\n\n\tdef normFactor(self):\n\t\t\"\"\" Method that must be implemented to return a correct normalization factor\"\"\"\n\t\traise NotImplemented\n\n\tdef run(self):\n\t\tself.prepareSpectrum()\n\n\t\tself.scoreList = None\n\t\tself.rankList = None\n\t\tself.evz = {}\n\n\t\tif self.normalized:\n\t\t\tnormFactor = self.normFactor()\n\t\telse:\n\t\t\tnormFactor = 1\n\n\t\tfor v in self.graph.nodes():\n\t\t\tself.evz[v] = self.eigenvector[v] * normFactor\n\t\treturn self\n\n\tdef scores(self):\n\t\tif self.scoreList is None:\n\t\t\tself.scoreList = [v for k,v in self.evz.items()]\n\n\t\treturn self.scoreList\n\n\tdef ranking(self):\n\t\tif self.rankList is None:\n\t\t\tself.rankList = sorted(self.evz.items(),key=lambda x: float(x[1]), reverse=True)\n\t\treturn self.rankList\n\n\nclass SciPyEVZ(SpectralCentrality):\n\t\"\"\"\n\tCompute Eigenvector centrality using algebraic meh\n\n\tParameters\n\t----------\n\tG : graph\n\t\tThe graph of which to compute the centrality\n\tnormalized : boolean\n\t\t\t\t Whether to normalize the results or not\n\n\t\"\"\"\n\tdef __init__(self, G, normalized=False):\n\t\tif G.isDirected():\n\t\t\traise NotImplementedError(\"Not implemented for directed graphs; use centrality.EigenvectorCentrality instead\")\n\t\tsuper(SciPyEVZ, self).__init__(G, normalized=normalized)\n\n\tdef _length(self, vector):\n\t\tsquare = sum([val * val for val in vector])\n\t\treturn math.sqrt(square)\n\n\tdef normFactor(self):\n\t\treturn 1 / self._length(self.eigenvector)\n\n\tdef prepareSpectrum(self):\n\t\tspectrum = adjacencyEigenvector(self.graph, order=0)\n\t\tself.eigenvector = spectrum[1]\n\t\tself.eigenvalue = spectrum[0]\n\nclass SciPyPageRank(SpectralCentrality):\n\t# TODO: docstring\n\tdef __init__(self, G, damp=0.95, normalized=False):\n\t\tsuper(SciPyPageRank, self).__init__(G, normalized=normalized)\n\n\t\tself.damp = damp\n\n\tdef _length(self, vector):\n\t\treturn sum(vector)\n\n\tdef normFactor(self):\n\t\treturn 1 / self._length(self.eigenvector)\n\n\tdef prepareSpectrum(self):\n\t\tprMatrix = PageRankMatrix(self.graph, self.damp)\n\t\tspectrum = symmetricEigenvectors(prMatrix, cutoff=0, reverse=False)\n\n\t\tself.eigenvector = spectrum[1][0]\n\t\tself.eigenvalue = spectrum[0][0]\n" }, { "alpha_fraction": 0.6821516156196594, "alphanum_fraction": 0.6955990195274353, "avg_line_length": 19.9743595123291, "blob_id": "cf85f07b63b3aa7e7ebbf153d5581f4e5f72631a", "content_id": "dee8a16d40368a8bc990b9c80f7529b0bc54316f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 818, "license_type": "permissive", "max_line_length": 102, "num_lines": 39, "path": "/networkit/cpp/scd/SelectiveCommunityDetector.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SelectiveCommunityDetector.h\n *\n * Created on: 15.05.2013\n * Author: cls\n */\n\n#ifndef SELECTIVECOMMUNITYDETECTOR_H_\n#define SELECTIVECOMMUNITYDETECTOR_H_\n\n#include <unordered_set>\n\n#include \"../auxiliary/Timer.h\"\n#include \"../graph/Graph.h\"\n\nnamespace NetworKit {\n\nclass SelectiveCommunityDetector {\n\npublic:\n\n\tSelectiveCommunityDetector(const Graph& G);\n\n /**\n * Detect communities for given seed nodes.\n * @return a mapping from seed node to community (as a set of nodes)\n */\n\tvirtual std::map<node, std::set<node> > run(std::set<unsigned int>& seeds) = 0;\n\n\t// FIXME: resolve Cython issue that does not allow a uint64_t as content type of a container as input\n\n\nprotected:\n\n\tconst Graph& G;\t//!< the input graph\n};\n\n} /* namespace NetworKit */\n#endif /* SELECTIVECOMMUNITYDETECTOR_H_ */\n" }, { "alpha_fraction": 0.6601941585540771, "alphanum_fraction": 0.6860841512680054, "avg_line_length": 14.399999618530273, "blob_id": "a9dd083263ee0f31e373c40961bb0b222054459d", "content_id": "dac92e86813f05c88c85a432c052f03354727a33", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 309, "license_type": "permissive", "max_line_length": 47, "num_lines": 20, "path": "/networkit/cpp/spanning/test/SpanningTreeGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SpanningTreeGTest.h\n *\n * Created on: 20.06.2015\n * Author: Henning\n */\n\n#ifndef SPANNINGTREEGTEST_H_\n#define SPANNINGTREEGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass SpanningTreeGTest: public testing::Test {\n\n};\n\n} /* namespace NetworKit */\n#endif /* SPANNINGTREEGTEST_H_ */\n\n" }, { "alpha_fraction": 0.647814929485321, "alphanum_fraction": 0.6503856182098389, "avg_line_length": 12, "blob_id": "f5cb92ea68e4d3bf9a6d8edc54fea6d08af0e583", "content_id": "d91620649d6dbbd4c18cf638830d1dc8f05bd195", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 389, "license_type": "permissive", "max_line_length": 25, "num_lines": 30, "path": "/Doc/doc/api/modules.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "Python Documentation\n====================\n\n.. automodule:: networkit\n\n.. toctree::\n :maxdepth: 4\n\n algebraic\n centrality\n coloring\n components\n correlation\n community\n distance\n dynamic\n engineering\n generators\n gephi\n globals\n graph\n graphio\n linkprediction\n nxadapter \n profiling\n sparsification\n stopwatch\n structures\n termgraph\n viztasks" }, { "alpha_fraction": 0.6571428775787354, "alphanum_fraction": 0.6755102276802063, "avg_line_length": 16.5, "blob_id": "a7e25d9bf6625df6bc3be09aab8bf1d1ba5261f1", "content_id": "68161bba082b4c4d08d29858e8869fc07f89d2fb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 490, "license_type": "permissive", "max_line_length": 44, "num_lines": 28, "path": "/networkit/cpp/geometric/test/GeometricGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GeometricGTest.h\n *\n * Created on: 29.07.2014\n * Author: moritzl\n */\n\n#ifndef GEOMETRICGTEST_H_\n#define GEOMETRICGTEST_H_\n\n#include <gtest/gtest.h>\n#include <cmath>\n\n#include \"../../auxiliary/Log.h\"\n#include \"../../auxiliary/Random.h\"\n#include \"../HyperbolicSpace.h\"\n#include \"../Point2D.h\"\n\nnamespace NetworKit {\n\nclass GeometricGTest: public testing::Test {\npublic:\n\tGeometricGTest();\n\tvirtual ~GeometricGTest();\n};\n\n} /* namespace NetworKit */\n#endif /* GEOMETRICGTEST_H_ */\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6643356680870056, "avg_line_length": 13.300000190734863, "blob_id": "516b1f729f4822c8cae50ce7b680b13cce55bc66", "content_id": "f062c37c7ceae03631c7ba5e88fbaddccd64a3d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 286, "license_type": "permissive", "max_line_length": 42, "num_lines": 20, "path": "/networkit/cpp/graph/test/DynSSSPGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynSSSPGTest.h\n *\n * Created on: 21.07.2014\n * Author: ebergamini\n */\n\n#ifndef DYNSSSPGTEST_H_\n#define DYNSSSPGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass DynSSSPGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* DYNSSSPGTEST_H_ */\n" }, { "alpha_fraction": 0.5403726696968079, "alphanum_fraction": 0.590062141418457, "avg_line_length": 12.416666984558105, "blob_id": "06b5b0cdd644af902855c7a530be8f2858d2a14c", "content_id": "b350bd2572a99e94fbf89d69d0b492c93613d742", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 161, "license_type": "permissive", "max_line_length": 32, "num_lines": 12, "path": "/networkit/cpp/numerics/Smoother.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Smoother.cpp\n *\n * Created on: 31.10.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"Smoother.h\"\n\nnamespace NetworKit {\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.7181467413902283, "alphanum_fraction": 0.7220077514648438, "avg_line_length": 14.235294342041016, "blob_id": "97148e5bfc92e02dbdc5b05e2ce84212a1952816", "content_id": "c33f0fb13ba74d6055bf79b750ee1d5e8608b560", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 259, "license_type": "permissive", "max_line_length": 39, "num_lines": 17, "path": "/networkit/cpp/scd/test/SelectiveCDGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "#ifndef NOGTEST\n\n#ifndef CLUSTERINGALGOGTEST_H_\n#define CLUSTERINGALGOGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass SCDGTest2: public testing::Test {\n\n};\n\n} /* namespace NetworKit */\n#endif /* CLUSTERINGALGOGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6890971064567566, "alphanum_fraction": 0.6967632174491882, "avg_line_length": 21.576923370361328, "blob_id": "1614853f973c3dc04cd5b335f9d2e4565ad43df8", "content_id": "4703182a3646977424761242af9496d854d8f406", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1174, "license_type": "permissive", "max_line_length": 86, "num_lines": 52, "path": "/networkit/cpp/io/EdgeListWriter.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * EdgeListWriter.h\n *\n * Created on: 18.06.2013\n * Author: cls\n */\n\n#ifndef EDGELISTWRITER_H_\n#define EDGELISTWRITER_H_\n\n#include <fstream>\n#include <iostream>\n#include <string>\n\n#include \"GraphReader.h\"\n\nnamespace NetworKit {\n\n/**\n * A writer for the edge list format used by the LFR benchmark generators, defined as:\n * \t\tlist of edges (nodes are labelled from 1 to the number of nodes;\n * \t\tthe edges are ordered and repeated twice, i.e. source-target and target-source).\n *\n * \tThe starting index is a parameter to enable other edge list formats.\n */\nclass EdgeListWriter {\n\npublic:\n\n\tEdgeListWriter() = default; //nullary constructor for Python shell\n\n\t/**\n\t * @param[in]\tseparator\tcharacter used to separate nodes in an edge line\n\t * @param[in]\tfirstNode\tindex of the first node in the file\n\t */\n\tEdgeListWriter(char separator, node firstNode);\n\n\t/**\n\t * Write the graph to a file.\n\t * @param[in]\tG\t\tthe graph\n\t * @param[in]\tpath\tthe output file path\n\t */\n\tvoid write(const Graph& G, std::string path);\n\nprotected:\n\n\tchar separator; \t//!< character separating nodes in an edge line\n\tnode firstNode;\n};\n\n} /* namespace NetworKit */\n#endif /* EDGELISTIO_H_ */\n" }, { "alpha_fraction": 0.6391489505767822, "alphanum_fraction": 0.6774467825889587, "avg_line_length": 27.31325340270996, "blob_id": "08fe171b29f9a4e7d077ef3fb3a4e031c82b9832", "content_id": "2a98d748269e977099728f45f185a74bd48f3a4a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2350, "license_type": "permissive", "max_line_length": 82, "num_lines": 83, "path": "/networkit/cpp/edgescores/test/ChibaNishizekiQuadrangleEdgeScoreGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ChibaNishizekiEdgeScoreGTest.cpp\n *\n * Created on: 23.05.2014\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#include \"ChibaNishizekiQuadrangleEdgeScoreGTest.h\"\n\n#include \"../ChibaNishizekiQuadrangleEdgeScore.h\"\n\nnamespace NetworKit {\n\nTEST_F(ChibaNishizekiQuadrangleEdgeScoreGTest, testQuadrangleCountsTrivial) {\n\tGraph g(5);\n\n\tg.addEdge(0,1);\n\tg.addEdge(0,2);\n\tg.addEdge(1,3);\n\tg.addEdge(2,3);\n\n\tg.indexEdges();\n\n\tChibaNishizekiQuadrangleEdgeScore counter(g);\n\tcounter.run();\n\tstd::vector<count> counts = counter.scores();\n\n\tEXPECT_EQ(1, (counts[g.edgeId(0,1)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,2)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(1,3)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(2,3)])) << \"wrong quadrangle count\";\n\t//EXPECT_EQ(0, (counts[g.edgeId(2,3)])) << \"wrong quadrangle count\";\n\t//TODO: edge ids for non-existing edges currently result in unexpected behaviour.\n}\n\nTEST_F(ChibaNishizekiQuadrangleEdgeScoreGTest, testQuadrangleCountsSimple) {\n\tcount n = 7;\n\tGraph g(n);\n\n\tg.addEdge(0,1);\n\tg.addEdge(0,3);\n\tg.addEdge(0,4);\n\tg.addEdge(0,6);\n\tg.addEdge(1,2);\n\tg.addEdge(1,3);\n\tg.addEdge(2,3);\n\tg.addEdge(3,5);\n\tg.addEdge(3,6);\n\tg.addEdge(4,5);\n\n\tg.indexEdges();\n\n\tEXPECT_EQ(10, g.numberOfEdges()) << \"wrong edge count\";\n\n\tChibaNishizekiQuadrangleEdgeScore counter(g);\n\tcounter.run();\n\tstd::vector<count> counts = counter.scores();\n\n\tEXPECT_EQ(7, g.numberOfNodes()) << \"undesired side effect\";\n\tEXPECT_EQ(10, g.numberOfEdges()) << \"undesired side effect\";\n\n\tEXPECT_EQ(10, counts.size()) << \"wrong quadrangle count map size\";\n\tEXPECT_EQ(2, (counts[g.edgeId(0,1)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(2, (counts[g.edgeId(0,3)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,4)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,6)])) << \"wrong quadrangle count\";\n\t\n\tEXPECT_EQ(1, (counts[g.edgeId(1,2)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(1,3)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(2,3)])) << \"wrong quadrangle count\";\n\n\tEXPECT_EQ(1, (counts[g.edgeId(3,5)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(3,6)])) << \"wrong quadrangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(4,5)])) << \"wrong quadrangle count\";\n}\n\n\n}\n/* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.7375951409339905, "alphanum_fraction": 0.7391172051429749, "avg_line_length": 51.98387145996094, "blob_id": "85b2cabc695b6cb6a4d18d5ff0891b5a455eca56", "content_id": "8b00e4490f453e049e401e8a9d7307a2ef56c905", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3309, "license_type": "permissive", "max_line_length": 175, "num_lines": 62, "path": "/Doc/doc/networkit-vm_guide.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "===============================\nNetworKit VM Installation Guide\n===============================\n\nThis step-by-step guide will help you through the installation of NetworKit in a virtual machine.\n\n.. note:: This type of installation is only recommended for users running Microsoft Windows or for testing NetworKit since the\n performance might be worse due to virtualization.\n\n\nStep 1 - Installing Oracle VM VirtualBox\n----------------------------------------\n\nIf you do not already have VirtualBox installed on your system, visit https://www.virtualbox.org/wiki/Downloads and download the right VirtualBox\nbinary for your system. Once downloaded, run the setup and follow the instructions to install VirtualBox on your system.\n\n\nStep 2 - Import NetworKit VM into VirtualBox\n--------------------------------------------\n\nFirst, download the `NetworKit VM <https://networkit.iti.kit.edu/uploads/networkit-vm.zip>`_. After that, open VirtualBox and click on „File -> Import Appliance ...“\nor the corresponding entry in your system language.\n\n.. image:: resources/networkit_vm_import.png\n :align: center\n\n|\n\nIn the opening dialog, click on the small folder icon and specify the path to the NetworKit VM file you downloaded.\nHitting „Next“ will show you some options for setting up the virtual machine (number of cores, amount of RAM, etc.).\nThe standard settings should be fine for the moment and you can finish this step by clicking the „Import“ button.\n\n\nStep 3 - Running the NetworKit VM\n---------------------------------\n\nAfter the previous steps, you will find an entry „networkit“ in the left list of available virtual machines in VirtualBox.\nTo start the virtual machine, double click on „networkit“ or select the entry and click on the „Start“ button in the top\nmenu bar.\n\n.. image:: resources/networkit_vm_start.png\n\t:align: center\n\n\n|\n\nThe current version of NetworKit is preinstalled and can be found in the folder linked on the desktop. Now you can continue\nto read our `Get Started <get_started.html>`_ guide to see how to build the C++ part of NetworKit if you want to develop new\nalgorithms or to see how you can use NetworKit with IPython. Make sure to check out the\n`NetworKit UserGuide <http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/uploads/docs/NetworKit_UserGuide.ipynb>`_ to see usage\nexamples.\n\n\nShared folders\n--------------\n\nIn order to access files of your host system in the NetworKit VM and vice versa, you can point VirtualBox to a shared folder on your host system.\nTo add a shared folder, open the settings of the NetworKit VM by selecting \"networkit\" from the list of available virtual machines and click \"Settings\".\nChange to the \"Shared Folders\" section and press the small folder symbol to the right. A dialog will popup that asks for the path to the folder you\nwant to share with the NetworKit VM. The folder name can be left as is or changed to some other name that will be displayed in the virtual machine. Make\nsure to click \"Auto-mount\" such that the folder is automatically mounted when you start the virtual machine. The folder should now be accessible from the\nfile manager in the NetworKit VM. For further information on shared folders, please visit the `VirtualBox manual <https://www.virtualbox.org/manual/ch04.html#sharedfolders>`_.\n" }, { "alpha_fraction": 0.7477441430091858, "alphanum_fraction": 0.7531582117080688, "avg_line_length": 22.195348739624023, "blob_id": "17bc812231028b51a6285a305c8e72b27ff03360", "content_id": "76fd653ef37a3b63626d489c69bcf2ccacb422f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4987, "license_type": "permissive", "max_line_length": 161, "num_lines": 215, "path": "/benchmark/nk.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "import networkit\n\nfrom util import *\nimport base\n\nframework = \"networkit\"\n\n# - connected components (properties.ConnectedComponents, properties.ParallelConnectedComponents)\n\nclass Algo(base.Algo):\n\t\"\"\" runner for an algorithm\"\"\"\n\n\tframework = framework\n\n\tdef loadGraph(self, path, graphFormat=networkit.Format.GML):\n\t\twith Timer() as t:\n\t\t\tG = networkit.readGraph(path, graphFormat)\n\t\tdebug(\"reading {path} took {t.elapsed} s\".format(**locals()))\n\t\treturn G\n\nclass bConnectedComponents(Algo):\n\tname = \"ConnectedComponents\"\n\n\tdef run(self, G):\n\t\tcc = networkit.components.ConnectedComponents(G)\n\t\tcc.run()\n\t\treturn cc.numberOfComponents()\n\n\n# - k-core decomposition (properties.CoreDecomposition)\n\nclass bCoreDecomposition(Algo):\n\tname = \"CoreDecomposition\"\n\n\tdef run(self, G):\n\t\tcd = networkit.centrality.CoreDecomposition(G)\n\t\tcd.run()\n\nclass bCoreDecompositionSeq(Algo):\n\tname = \"CoreDecompositionSeq\"\n\n\tdef run(self, G):\n\t\tcd = networkit.centrality.CoreDecomposition(G, enforceBucketQueueAlgorithm=True)\n\t\tcd.run()\n\n\n# - BFS & Dijkstra (graph.BFS, graph.Dijkstra)\nclass bBFS(Algo):\n\tname = \"BFS\"\n\n\tdef run(self, G):\n\t\tbfs = networkit.graph.BFS(G, G.randomNode(), storePaths=False)\n\t\tbfs.run()\n\n\n# - community detection (community.PLM, community.PLP)\n\nclass bCommunityDetectionLM(Algo):\n\tname = \"CommunityDetectionLM\"\n\n\tdef run(self, G):\n\t\tplm = networkit.community.PLM(G, turbo=True)\n\t\tplm.run()\n\nclass bCommunityDetectionLP(Algo):\n\tname = \"CommunityDetectionLP\"\n\n\tdef run(self, G):\n\t\tplm = networkit.community.PLP(G)\n\t\tplm.run()\n\n\n# distance module\n\nclass bDiameter(Algo):\n\tname = \"Diameter\"\n\n\tdef run(self, G):\n\t\treturn networkit.distance.Diameter(G, networkit.distance.DiameterAlgo.exact).run()\n\n\nclass bDiameterEstimate(Algo):\n\tname = \"DiameterEstimate\"\n\n\tdef run(self, G):\n\t\treturn networkit.distance.Diameter(G, networkit.distance.DiameterAlgo.estimatedRange, error=0.1).run()\n\n\nclass bEffectiveDiameter(Algo):\n\tname = \"EffectiveDiameter\"\n\n\tdef run(self, G):\n\t\treturn networkit.distance.EffectiveDiameter(G).run().getEffectiveDiameter()\n\n\nclass bApproxEffectiveDiameter(Algo):\n\tname = \"ApproxEffectiveDiameter\"\n\n\tdef run(self, G):\n\t\treturn networkit.distance.ApproxEffectiveDiameter(G).run().getEffectiveDiameter()\n\n\nclass bApproxHopPlot(Algo):\n\tname = \"ApproxHopPlot\"\n\n\tdef run(self, G):\n\t\treturn networkit.distance.ApproxHopPlot(G).run().getHopPlot()\n\n\nclass bNeighborhoodFunction(Algo):\n\tname = \"NeighborhoodFunction\"\n\n\tdef run(self, G):\n\t\treturn networkit.distance.NeighborhoodFunction(G).run().getNeighborhoodFunction()\n\n\nclass bApproxNeighborhoodFunction(Algo):\n\tname = \"ApproxNeighborhoodFunction\"\n\n\tdef run(self, G):\n\t\treturn networkit.distance.ApproxNeighborhoodFunction(G).run().getNeighborhoodFunction()\n\n\n\n# - clustering coefficients (average local), exact (properties.ClusteringCoefficient.avgLocal) and approximated (properties.ClusteringCoefficient.approxAvgLocal)\n\nclass bClusteringCoefficient(Algo):\n\tname = \"ClusteringCoefficient\"\n\n\tdef run(self, G):\n\t\tnetworkit.centrality.LocalClusteringCoefficient(G).run()\n\n\nclass bApproxClusteringCoefficient(Algo):\n\tname = \"ClusteringCoefficientApprox\"\n\n\tdef run(self, G):\n\t\t# TODO: specify number of trials\n\t\tc = networkit.properties.ClusteringCoefficient.approxAvgLocal(G, trials=1000)\n\t\treturn c\n\n\n\n# - centrality\n\n# \t- PageRank (centrality.PageRank, centrality.SciPyPageRank)\n\nclass bPageRank(Algo):\n\tname = \"PageRank\"\n\n\tdef run(self, G):\n\t\tpr = networkit.centrality.PageRank(G, damp=0.85, tol=1e-06)\n\t\tpr.run()\n\n# \t- Eigenvector centrality (centrality.EigenvectorCentrality, centrality.SciPyEVZ)\n\nclass bEigenvectorCentrality(Algo):\n\tname = \"EigenvectorCentrality\"\n\n\tdef run(self, G):\n\t\tevc = networkit.centrality.EigenvectorCentrality(G, tol=1e-06)\n\t\tevc.run()\n\nclass bKatzCentrality(Algo):\n\tname = \"KatzCentrality\"\n\n\tdef run(self, G):\n\t\tkc = networkit.centrality.KatzCentrality(G, tol=1e-06)\n\t\tkc.run()\n\n\nclass bDegreeAssortativity(Algo):\n\tname = \"DegreeAssortativity\"\n\n\tdef run(self, G):\n\t\tnetworkit.correlation.Assortativity(G, networkit.centrality.DegreeCentrality(G).run().scores()).run()\n\n\n# \t- betweenness, exact (centrality.Betweenness) and approximated (centrality.ApproxBetweenness, centrality.ApproxBetweenness2)\n\nclass bBetweenness(Algo):\n\tname = \"Betweenness\"\n\n\tdef run(self, G):\n\t\tbc = networkit.centrality.Betweenness(G)\n\t\tbc.run()\n\nclass bBetweennessSeq(Algo):\n\tname = \"BetweennessSeq\"\n\n\tdef run(self, G):\n\t\tmt = networkit.getMaxNumberOfThreads()\n\t\tnetworkit.setNumberOfThreads(1)\n\t\tbc = networkit.centrality.Betweenness(G)\n\t\tbc.run()\n\t\tnetworkit.setNumberOfThreads(mt)\n\n\nclass bApproxBetweenness(Algo):\n\tname = \"BetweennessApprox\"\n\n\tdef run(self, G):\n\t\tbc = networkit.centrality.ApproxBetweenness2(G, nSamples=42)\n\t\tbc.run()\n\n\nclass bApproxBetweennessSeq(Algo):\n\tname = \"BetweennessApproxSeq\"\n\n\tdef run(self, G):\n\t\tmt = networkit.getMaxNumberOfThreads()\n\t\tnetworkit.setNumberOfThreads(1)\n\t\tbc = networkit.centrality.ApproxBetweenness2(G, nSamples=42)\n\t\tbc.run()\n\t\tnetworkit.setNumberOfThreads(mt)\n" }, { "alpha_fraction": 0.6605091094970703, "alphanum_fraction": 0.6704212427139282, "avg_line_length": 22.98918914794922, "blob_id": "6f1ce063a22afd86fa8feaf95d762b22e65de52b", "content_id": "4165fa2e180c61cfddeb3dd39fe37f21760b5552", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4439, "license_type": "permissive", "max_line_length": 146, "num_lines": 185, "path": "/networkit/cpp/algebraic/Vector.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Vector.cpp\n *\n * Created on: 12.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"Vector.h\"\n\n#include \"Matrix.h\"\n\nnamespace NetworKit {\n\nVector::Vector() : values(0), transposed(false) {}\n\nVector::Vector(const count dimension, const double initialValue, const bool transpose) : values(dimension, initialValue), transposed(transpose) {}\n\nVector::Vector(const std::vector<double> &values, const bool transpose) : values(values), transposed(transpose) {\n}\n\nVector::Vector(const std::initializer_list<double> &list) : values(list), transposed(false) {\n}\n\nbool Vector::isTransposed() const {\n\treturn transposed;\n}\n\nVector Vector::transpose() const {\n\tVector v(*this);\n\tv.transposed = !this->transposed;\n\treturn v;\n}\n\ndouble Vector::length() const {\n\treturn std::sqrt(this->transpose() * (*this));\n}\n\ndouble Vector::mean() const {\n\tdouble sum = 0.0;\n\tthis->forElements([&](double value){\n\t\tsum += value;\n\t});\n\n\treturn sum / (double) this->getDimension();\n}\n\nbool Vector::operator==(const Vector &other) const {\n\tif (getDimension() != other.getDimension() || isTransposed() != other.isTransposed()) return false;\n\n\tfor (index i = 0; i < getDimension(); i++) {\n\t\tif (values[i] != other[i]) return false;\n\t}\n\n\treturn true;\n}\n\nbool Vector::operator!=(const Vector &other) const {\n\treturn !(*this == other);\n}\n\ndouble Vector::innerProduct(const Vector &v1, const Vector &v2) {\n\tassert(v1.getDimension() == v2.getDimension());\n\tdouble scalar = 0.0;\n\tfor (index i = 0; i < v1.getDimension(); ++i) {\n\t\tscalar += v1[i] * v2[i];\n\t}\n\n\treturn scalar;\n}\n\nMatrix Vector::outerProduct(const Vector &v1, const Vector &v2) {\n\tstd::vector<Vector> rows(v1.getDimension(), Vector(v2.getDimension(), 0.0));\n\n#pragma omp parallel for\n\tfor (index i = 0; i < v1.getDimension(); ++i) {\n\t\tfor (index j = 0; j < v2.getDimension(); ++j) {\n\t\t\trows[i][j] = v1[i] * v2[j];\n\t\t}\n\t}\n\n\treturn Matrix(rows);\n}\n\ndouble Vector::operator*(const Vector &other) const {\n\tassert(isTransposed() && !other.isTransposed()); // vectors must be transposed correctly for inner product\n\tassert(getDimension() == other.getDimension()); // dimensions of vectors must match\n\n\treturn innerProduct(*this, other);\n}\n\nVector Vector::operator*(const Matrix &matrix) const {\n\tassert(isTransposed()); // vector must be of the form 1xn\n\tassert(getDimension() == matrix.numberOfRows()); // dimensions of vector and matrix must match\n\n\tVector result(matrix.numberOfColumns(), 0.0, true);\n#pragma omp parallel for\n\tfor (count k = 0; k < matrix.numberOfColumns(); ++k) {\n\t\tVector column = matrix.column(k);\n\t\tresult[k] = (*this) * column;\n\t}\n\n\treturn result;\n}\n\nVector Vector::operator*(const double &scalar) const {\n\treturn Vector(*this) *= scalar;\n}\n\nVector& Vector::operator*=(const double &scalar) {\n#pragma omp parallel for\n\tfor (count i = 0; i < getDimension(); i++) {\n\t\tvalues[i] *= scalar;\n\t}\n\n\treturn *this;\n}\n\nVector Vector::operator/(const double &divisor) const {\n\treturn Vector(*this) /= divisor;\n}\n\nVector& Vector::operator/=(const double &divisor) {\n\treturn *this *= 1 / divisor;\n}\n\nVector Vector::operator+(const Vector &other) const {\n\treturn Vector(*this) += other;\n}\n\nVector Vector::operator+(const double value) const {\n\treturn Vector(*this) += value;\n}\n\nVector& Vector::operator+=(const Vector &other) {\n\tassert(isTransposed() == other.isTransposed()); // vectors must be transposed correctly\n\tassert(getDimension() == other.getDimension()); // dimensions of vectors must match\n\n#pragma omp parallel for\n\tfor (count i = 0; i < getDimension(); i++) {\n\t\tvalues[i] += other[i];\n\t}\n\n\treturn *this;\n}\n\nVector& Vector::operator+=(const double value) {\n#pragma omp parallel for\n\tfor (count i = 0; i < getDimension(); ++i) {\n\t\tvalues[i] += value;\n\t}\n\n\treturn *this;\n}\n\nVector Vector::operator-(const Vector &other) const {\n\treturn Vector(*this) -= other;\n}\n\nVector Vector::operator-(const double value) const {\n\treturn Vector(*this) += value;\n}\n\nVector& Vector::operator-=(const Vector &other) {\n\tassert(isTransposed() == other.isTransposed()); // vectors must be transposed correctly\n\tassert(getDimension() == other.getDimension()); // dimensions of vectors must match\n\n#pragma omp parallel for\n\tfor (count i = 0; i < getDimension(); i++) {\n\t\tvalues[i] -= other[i];\n\t}\n\n\treturn *this;\n}\n\nVector& Vector::operator-=(const double value) {\n#pragma omp parallel for\n\tfor (count i = 0; i < getDimension(); ++i) {\n\t\tvalues[i] -= value;\n\t}\n\n\treturn *this;\n}\n\n\n} /* namespace NetworKit */\n\n" }, { "alpha_fraction": 0.6563876867294312, "alphanum_fraction": 0.6740087866783142, "avg_line_length": 14.133333206176758, "blob_id": "5f0017e85288594f75ed3b27b787f136d3ff2847", "content_id": "af5df21632735e2c991a0a5d9dad974959faa66b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 454, "license_type": "permissive", "max_line_length": 53, "num_lines": 30, "path": "/networkit/cpp/coarsening/test/CoarseningGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CoarseningGTest.h\n *\n * Created on: 20.12.2012\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef COARSENINGGTEST_H_\n#define COARSENINGGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\n/**\n * googletest test fixture for the coarsening module.\n */\nclass CoarseningGTest: public testing::Test {\n\n\t// TODO: are constructor/destructor needed?\n\n};\n\n\n} /* namespace NetworKit */\n#endif /* COARSENINGGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6225221157073975, "alphanum_fraction": 0.6615352034568787, "avg_line_length": 28.82390022277832, "blob_id": "99db8fa0dc93b82bace3d8c2c268df7a08ea79bc", "content_id": "dfff8a93b34778e3703cc83bc134b9a6592f31f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4742, "license_type": "permissive", "max_line_length": 82, "num_lines": 159, "path": "/networkit/cpp/edgescores/test/ChibaNishizekiTriangleEdgeScoreGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ChibaNishizekiEdgeScoreGTest.cpp\n *\n * Created on: 23.05.2014\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#include \"ChibaNishizekiTriangleEdgeScoreGTest.h\"\n#include \"../ChibaNishizekiTriangleEdgeScore.h\"\n#include \"../TriangleEdgeScore.h\"\n\nnamespace NetworKit {\n\nTEST_F(ChibaNishizekiTriangleEdgeScoreGTest, testTriangleCountsTrivial) {\n\tGraph g(5);\n\n\tg.addEdge(0,1);\n\tg.addEdge(0,2);\n\tg.addEdge(1,2);\n\n\tg.indexEdges();\n\n\tChibaNishizekiTriangleEdgeScore counter(g);\n\tcounter.run();\n\tstd::vector<count> counts = counter.scores();\n\n\tEXPECT_EQ(1, (counts[g.edgeId(0,1)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,2)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(1,2)])) << \"wrong triangle count\";\n}\n\nTEST_F(ChibaNishizekiTriangleEdgeScoreGTest, testNewTriangleCountsTrivial) {\n\tGraph g(5);\n\n\tg.addEdge(0,1);\n\tg.addEdge(0,2);\n\tg.addEdge(1,2);\n\n\tg.indexEdges();\n\n\tTriangleEdgeScore counter(g);\n\tcounter.run();\n\tstd::vector<count> counts = counter.scores();\n\n\tEXPECT_EQ(1, (counts[g.edgeId(0,1)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,2)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(1,2)])) << \"wrong triangle count\";\n\t//EXPECT_EQ(0, (counts[g.edgeId(2,3)])) << \"wrong triangle count\";\n\t//TODO: edge ids for non-existing edges currently result in unexpected behaviour.\n}\n\n\nTEST_F(ChibaNishizekiTriangleEdgeScoreGTest, testTriangleCountsSimple) {\n\tint64_t n = 6;\n\tGraph g(n);\n\n\tg.addEdge(0,1);\n\tg.addEdge(0,2);\n\tg.addEdge(1,2);\n\n\tg.addEdge(0,4);\n\tg.addEdge(0,3);\n\tg.addEdge(3,4);\n\n\tg.addEdge(0,5);\n\tg.addEdge(4,5);\n\n\tg.indexEdges();\n\n\tEXPECT_EQ(8, g.numberOfEdges()) << \"wrong edge count\";\n\n\tChibaNishizekiTriangleEdgeScore counter(g);\n\tcounter.run();\n\tstd::vector<count> counts = counter.scores();\n\n\tEXPECT_EQ(6, g.numberOfNodes()) << \"undesired side effect\";\n\tEXPECT_EQ(8, g.numberOfEdges()) << \"undesired side effect\";\n\n\tEXPECT_EQ(8, counts.size()) << \"wrong triangle count map size\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,1)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,2)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(1,2)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(1, (counts[g.edgeId(0,3)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(3,4)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(2, (counts[g.edgeId(0,4)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,5)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(4,5)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(1, (counts[g.edgeId(1,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(2,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(2,1)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(1, (counts[g.edgeId(3,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(4,3)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(2, (counts[g.edgeId(4,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(5,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(5,4)])) << \"wrong triangle count\";\n}\n\nTEST_F(ChibaNishizekiTriangleEdgeScoreGTest, testNewTriangleCountsSimple) {\n\tint64_t n = 6;\n\tGraph g(n);\n\n\tg.addEdge(0,1);\n\tg.addEdge(0,2);\n\tg.addEdge(1,2);\n\n\tg.addEdge(0,4);\n\tg.addEdge(0,3);\n\tg.addEdge(3,4);\n\n\tg.addEdge(0,5);\n\tg.addEdge(4,5);\n\n\tg.indexEdges();\n\n\tEXPECT_EQ(8, g.numberOfEdges()) << \"wrong edge count\";\n\n\tTriangleEdgeScore counter(g);\n\tcounter.run();\n\tstd::vector<count> counts = counter.scores();\n\n\tEXPECT_EQ(6, g.numberOfNodes()) << \"undesired side effect\";\n\tEXPECT_EQ(8, g.numberOfEdges()) << \"undesired side effect\";\n\n\tEXPECT_EQ(8, counts.size()) << \"wrong triangle count map size\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,1)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,2)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(1,2)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(1, (counts[g.edgeId(0,3)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(3,4)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(2, (counts[g.edgeId(0,4)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(0,5)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(4,5)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(1, (counts[g.edgeId(1,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(2,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(2,1)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(1, (counts[g.edgeId(3,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(4,3)])) << \"wrong triangle count\";\n\n\tEXPECT_EQ(2, (counts[g.edgeId(4,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(5,0)])) << \"wrong triangle count\";\n\tEXPECT_EQ(1, (counts[g.edgeId(5,4)])) << \"wrong triangle count\";\n}\n\n\n}\n/* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6523174047470093, "alphanum_fraction": 0.6607441902160645, "avg_line_length": 29.35284996032715, "blob_id": "900d3fe8d69300fddc7bd5af1d6438acf76ffb9a", "content_id": "ea8d092a1ac37a11ce93a11beafb049023a09faf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 19699, "license_type": "permissive", "max_line_length": 248, "num_lines": 649, "path": "/networkit/cpp/numerics/LAMG/MultiLevelSetup.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MultiLevelSetup.cpp\n *\n * Created on: 10.01.2015\n * Author: Michael Wegner (michael,[email protected])\n */\n\n#include \"MultiLevelSetup.h\"\n#include \"Level/LevelElimination.h\"\n#include \"Level/EliminationStage.h\"\n#include \"LAMGSettings.h\"\n#include \"../../auxiliary/StringTools.h\"\n#include \"../../auxiliary/Enforce.h\"\n#include \"../../auxiliary/Timer.h\"\n#include \"../../algebraic/CSRMatrix.h\"\n\n#include <fstream>\n#include <iostream>\n#include <sstream>\n#include <cstdio>\n#include <set>\n#include \"omp.h\"\n\nnamespace NetworKit {\n\n#ifndef NDEBUG\ncount MultiLevelSetup::eliminationTime = 0;\ncount MultiLevelSetup::schurComplementTime = 0;\ncount MultiLevelSetup::aggregationTime = 0;\n#endif\n\nMultiLevelSetup::MultiLevelSetup(const Smoother &smoother) : smoother(smoother) {\n}\n\nvoid MultiLevelSetup::setup(const Graph &G, LevelHierarchy &hierarchy) const {\n\tsetup(CSRMatrix::graphLaplacian(G), hierarchy);\n}\n\nvoid MultiLevelSetup::setup(const CSRMatrix &matrix, LevelHierarchy &hierarchy) const {\n\tCSRMatrix A = matrix;\n\thierarchy.addFinestLevel(A);\n#ifndef NDEBUG\n\tDEBUG(\"FINEST\\t\", matrix.numberOfRows(), \"\\t\", matrix.nnz());\n#endif\n\n\tbool doneCoarsening = false;\n\tcount numTVs = TV_NUM;\n\tindex level = 0;\n\tA.sort();\n\twhile (!doneCoarsening) {\n\t\t// ELIMINATION\n\t\tif (coarseningElimination(A, hierarchy)) {\n\t\t\tif (!canCoarsen(A)) doneCoarsening = true;\n\t\t\tlevel++;\n#ifndef NDEBUG\n\t\t\tDEBUG(level, \" ELIM\\t\\t\", A.numberOfRows(), \"\\t\", A.nnz() / 2);\n#endif\n\t\t}\n\n\t\t// AGGREGATION\n\t\tVector tv;\n\t\tif (doneCoarsening || isRelaxationFast(A, level, tv)) {\n\t\t\tdoneCoarsening = true;\n\t\t} else {\n\t\t\tcoarseningAggregation(A, hierarchy, tv, numTVs);\n\t\t\tlevel++;\n#ifndef NDEBUG\n\t\t\tDEBUG(level, \" AGG\\t\\t\", A.numberOfRows(), \"\\t\", A.nnz() / 2);\n#endif\n\t\t\tif (numTVs < TV_MAX) {\n\t\t\t\tnumTVs += TV_INC;\n\t\t\t}\n\t\t}\n\n\t\tif (!canCoarsen(A)) doneCoarsening = true;\n\t}\n\n\thierarchy.setLastAsCoarsest();\n\n#ifndef NDEBUG\n\tDEBUG(\"Elimination: \", eliminationTime);\n\tDEBUG(\"Schur: \", schurComplementTime);\n\tDEBUG(\"Aggregation: \", aggregationTime);\n#endif\n}\n\nbool MultiLevelSetup::coarseningElimination(CSRMatrix &matrix, LevelHierarchy &hierarchy) const {\n#ifndef NDEBUG\n\tAux::Timer elimTimer;\n\tAux::Timer schurTimer;\n\telimTimer.start();\n#endif\n\tstd::vector<EliminationStage> coarseningStages;\n\tcount stageNum = 0;\n\twhile (stageNum < SETUP_ELIMINATION_MAX_STAGES) {\n\t\tif (matrix.numberOfRows() <= MAX_DIRECT_SOLVE_SIZE) break; // we do not need to coarsen the matrix any further\n\t\tstd::vector<bool> fNode;\n\t\tcount nf = lowDegreeSweep(matrix, fNode, stageNum);\n\t\tcount nc = matrix.numberOfRows() - nf;\n\n\t\tif (nc == 0) { // do not eliminate all nodes -> leave one entry in c\n\t\t\tnc = 1;\n\t\t\tnf--;\n\t\t}\n\n\t\t// add f nodes to fSet and c nodes to cSet\n\t\tstd::vector<index> fSet(nf);\n\t\tstd::vector<index> cSet(nc);\n\n\t\tstd::vector<index> coarseIndex(matrix.numberOfRows());\n\t\tcount numFNodes = 0;\n\t\tfor (index i = 0, fIndex = 0, cIndex = 0; i < matrix.numberOfRows(); ++i) {\n\t\t\tif (fNode[i] && fIndex < nf) {\n\t\t\t\tcoarseIndex[i] = fIndex;\n\t\t\t\tfSet[fIndex++] = i;\n\t\t\t\tnumFNodes++;\n\t\t\t} else {\n\t\t\t\tcoarseIndex[i] = cIndex;\n\t\t\t\tcSet[cIndex++] = i;\n\t\t\t}\n\t\t}\n\n\t\tif (nf <= SETUP_ELIMINATION_MIN_ELIM_FRACTION * matrix.numberOfRows()) {\n\t\t\tbreak;\n\t\t}\n\n\t\tCSRMatrix P;\n\t\tVector q;\n\t\teliminationOperators(matrix, fSet, coarseIndex, P, q);\n\t\tcoarseningStages.push_back(EliminationStage(P, q, fSet, cSet));\n\n#ifndef NDEBUG\n\t\tschurTimer.start();\n#endif\n\n\t\tCSRMatrix Acc = matrix.subMatrix(cSet, cSet); // Schur complement\n\t\tCSRMatrix Acf = matrix.subMatrix(cSet, fSet); // Schur complement\n\n\t\tmatrix = Acc + Acf * P;\n\n#ifndef NDEBUG\n\t\tschurTimer.stop();\n\t\tschurComplementTime += schurTimer.elapsedMilliseconds();\n#endif\n\n\t\tstageNum++;\n\t}\n\n\tif (stageNum != 0) { // we have coarsened the matrix\n\t\thierarchy.addEliminationLevel(matrix, coarseningStages);\n#ifndef NDEBUG\n\t\telimTimer.stop();\n\t\teliminationTime += elimTimer.elapsedMilliseconds();\n\t\t//schurComplementTime += schurTimer.elapsedMilliseconds();\n#endif\n\t\treturn true;\n\t}\n#ifndef NDEBUG\n\telimTimer.stop();\n\teliminationTime += elimTimer.elapsedMilliseconds();\n\t//schurComplementTime += schurTimer.elapsedMilliseconds();\n#endif\n\n\treturn false;\n}\n\ncount MultiLevelSetup::lowDegreeSweep(const CSRMatrix &matrix, std::vector<bool> &fNode, index stage) const {\n\tfNode.resize(matrix.numberOfRows(), true); // first mark all nodes as f nodes\n\tcount numFNodes = 0;\n\tint degreeOffset = stage != 0;\n\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tif ((int) matrix.nnzInRow(i) - degreeOffset <= (int)SETUP_ELIMINATION_MAX_DEGREE && fNode[i]) { // node i has degree <= 4 and can be eliminated\n\t\t\tnumFNodes++;\n\t\t\tmatrix.forNonZeroElementsInRow(i, [&](index j, edgeweight w){ // to maintain independence, mark all neighbors as not eliminated\n\t\t\t\tif (j != i)\t{ // all neighbors of this f node are c nodes\n\t\t\t\t\tfNode[j] = false;\n\t\t\t\t}\n\t\t\t});\n\t\t} else { // node has high degree, thus it is a c node\n\t\t\tfNode[i] = false;\n\t\t}\n\t}\n\n\treturn numFNodes;\n}\n\nvoid MultiLevelSetup::eliminationOperators(const CSRMatrix &matrix, const std::vector<index> &fSet, const std::vector<index> &coarseIndex, CSRMatrix &P, Vector &q) const {\n\tstd::vector<CSRMatrix::Triple> triples;\n\tq = Vector(fSet.size());\n\tfor (index k = 0; k < fSet.size(); ++k) { // Afc\n\t\tmatrix.forNonZeroElementsInRow(fSet[k], [&](index j, edgeweight w){\n\t\t\tif (fSet[k] == j) {\n\t\t\t\tq[k] = 1.0 / w;\n\t\t\t} else {\n\t\t\t\ttriples.push_back({k, coarseIndex[j], w});\n\t\t\t}\n\t\t});\n\t}\n\n\tfor (index i = 0; i < triples.size(); ++i) { // * -Aff^-1\n\t\ttriples[i].value *= -q[triples[i].row];\n\t}\n\n\tP = CSRMatrix(fSet.size(), coarseIndex.size() - fSet.size(), triples, matrix.sorted());\n}\n\nvoid MultiLevelSetup::subMatrix(const CSRMatrix &matrix, const std::vector<index> &rows, const std::vector<index> &columns, const std::vector<index> &coarseIndex, CSRMatrix &result) const {\n\tstd::vector<CSRMatrix::Triple> triples;\n\n\tfor (index k = 0; k < rows.size(); ++k) {\n\t\tmatrix.forNonZeroElementsInRow(rows[k], [&](index j, edgeweight value) {\n\t\t\tif (coarseIndex[j] < columns.size() && columns[coarseIndex[j]] == j) { // check if neighbor is in columns\n\t\t\t\ttriples.push_back({k, coarseIndex[j], value});\n\t\t\t}\n\t\t});\n\t}\n\n\tresult = CSRMatrix(rows.size(), columns.size(), triples, matrix.sorted());\n}\n\nvoid MultiLevelSetup::coarseningAggregation(CSRMatrix &matrix, LevelHierarchy &hierarchy, Vector &tv, count numTVVectors) const {\n#ifndef NDEBUG\n\tAux::Timer aggTimer;\n\taggTimer.start();\n#endif\n\tVector B(SETUP_MAX_AGGREGATION_STAGES, std::numeric_limits<double>::max());\n\tstd::vector<std::vector<index>> S(SETUP_MAX_AGGREGATION_STAGES, std::vector<index>(matrix.numberOfRows(), std::numeric_limits<index>::max()));\n\tstd::vector<index> status(matrix.numberOfRows(), UNDECIDED);\n\tstd::vector<count> nc(SETUP_MAX_AGGREGATION_STAGES, matrix.numberOfRows());\n\n\tdouble alpha = 1.0;\n\tdouble maxCoarseningRatio = SETUP_COARSENING_WORK_GUARD / SETUP_CYCLE_INDEX;\n\tcount stage = 0;\n\tcount nC = matrix.numberOfRows();\n\n\n\t// generate TVs\n\tstd::vector<Vector> tVs = generateTVs(matrix, tv, numTVVectors);\n\n\t// compute strong adjacency matrix\n\tCSRMatrix Wstrong;\n\tcomputeStrongAdjacencyMatrix(matrix, Wstrong);\n\n\t// compute affinityMatrix\n\tCSRMatrix affinityMatrix;\n\tcomputeAffinityMatrix(Wstrong, tVs, affinityMatrix);\n\n\t// mark all locally high-degree nodes as seeds\n\taddHighDegreeSeedNodes(matrix, status);\n\n\t// aggregate all loose nodes\n\taggregateLooseNodes(Wstrong, status, nC);\n\n\tnc[0] = nC;\n\twhile (stage < SETUP_MIN_AGGREGATION_STAGES || (alpha >= maxCoarseningRatio && stage < SETUP_MAX_AGGREGATION_STAGES)) {\n\t\tnC = stage > 0? nc[stage - 1] : nc[0];\n\n\t\t// aggregation stage\n\t\taggregationStage(matrix, nC, Wstrong, affinityMatrix, tVs, status);\n\n\t\talpha = (double) nC / (double) matrix.numberOfRows();\n\t\talpha <= maxCoarseningRatio? B[stage] = 1.0-alpha : B[stage] = 1.0+alpha;\n\n\t\tS[stage] = status;\n\t\tnc[stage] = nC;\n\t\tstage++;\n\t}\n\n\tdouble min = B[0];\n\tindex bestAggregate = 0;\n\tfor (index i = 1; i < stage; ++i) {\n\t\tif (B[i] < min) {\n\t\t\tbestAggregate = i;\n\t\t\tmin = B[i];\n\t\t}\n\t}\n\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tif (S[bestAggregate][i] == UNDECIDED) { // undediced nodes become their own seeds\n\t\t\tS[bestAggregate][i] = i;\n\t\t}\n\t}\n\n\tstd::vector<index> indexFine(matrix.numberOfRows(), 0);\n\tindex newIndex = 0;\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tif (S[bestAggregate][i] == i) {\n\t\t\tindexFine[i] = newIndex++;\n\t\t}\n\t}\n\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tstatus[i] = indexFine[S[bestAggregate][i]];\n\t}\n\n\tassert(newIndex == nc[bestAggregate]);\n\n\t// create interpolation matrix\n\tstd::vector<CSRMatrix::Triple> pTriples(matrix.numberOfRows());\n\tstd::vector<CSRMatrix::Triple> rTriples(matrix.numberOfRows());\n\tstd::vector<index> PColIndex(matrix.numberOfRows());\n\tstd::vector<std::vector<index>> PRowIndex(nc[bestAggregate]);\n\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tpTriples[i] = {i, status[i], 1};\n\t\trTriples[i] = {status[i], i, 1};\n\t\tPColIndex[i] = status[i];\n\t\tPRowIndex[status[i]].push_back(i);\n\t}\n\n\tCSRMatrix P(matrix.numberOfRows(), nc[bestAggregate], pTriples, matrix.sorted());\n\tCSRMatrix R(nc[bestAggregate], matrix.numberOfRows(), rTriples, matrix.sorted());\n\n\t// create coarsened laplacian\n\tgalerkinOperator(P, matrix, PColIndex, PRowIndex, matrix);\n\n\thierarchy.addAggregationLevel(matrix, P, R);\n\n#ifndef NDEBUG\n\taggTimer.stop();\n\taggregationTime += aggTimer.elapsedMilliseconds();\n#endif\n}\n\nstd::vector<Vector> MultiLevelSetup::generateTVs(const CSRMatrix &matrix, Vector &tv, count numVectors) const {\n\tstd::vector<Vector> testVectors(numVectors, Vector(matrix.numberOfColumns()));\n\n\ttestVectors[0] = tv;\n\n\tif (numVectors > 1) {\n\t\tVector b(matrix.numberOfColumns(), 0.0);\n#pragma omp parallel for\n\t\tfor (count i = 1; i < numVectors; ++i) {\n\t\t\tfor (count j = 0; j < matrix.numberOfColumns(); ++j) {\n\t\t\t\ttestVectors[i][j] = 2 * Aux::Random::probability() - 1;\n\t\t\t}\n\n\t\t\ttestVectors[i] = smoother.relax(matrix, b, testVectors[i], SETUP_TV_SWEEPS);\n\t\t}\n\t}\n\n\treturn testVectors;\n}\n\nvoid MultiLevelSetup::addHighDegreeSeedNodes(const CSRMatrix &matrix, std::vector<index> &status) const {\n\tstd::vector<count> deg(matrix.numberOfRows());\n#pragma omp parallel for\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tdeg[i] = matrix.nnzInRow(i) - 1;\n\t}\n\n#pragma omp parallel for\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tdouble num = 0.0;\n\t\tdouble denom = 0.0;\n\t\tmatrix.forNonZeroElementsInRow(i, [&](index j, double value){\n\t\t\tif (i != j) {\n\t\t\t\tnum += std::abs(value) * (double) deg[j];\n\t\t\t} else {\n\t\t\t\tdenom = std::abs(value);\n\t\t\t}\n\t\t});\n\n\n\t\tif ((double) deg[i] >= SETUP_AGGREGATION_DEGREE_THRESHOLD * (num / denom)) { // high degree node becomes seed\n\t\t\tstatus[i] = i;\n\t\t}\n\t}\n}\n\nvoid MultiLevelSetup::aggregateLooseNodes(const CSRMatrix &strongAdjMatrix, std::vector<index> &status, count &nc) const {\n\tstd::vector<index> looseNodes;\n\tfor (index i = 0; i < strongAdjMatrix.numberOfRows(); ++i) {\n\t\tdouble max = std::numeric_limits<double>::min();\n\t\tstrongAdjMatrix.forNonZeroElementsInRow(i, [&](index j, double value) {\n\t\t\tif (value > max) max = value;\n\t\t});\n\n\t\tif (std::abs(max) < 1e-9 || max == std::numeric_limits<double>::min()) {\n\t\t\tlooseNodes.push_back(i);\n\t\t}\n\t}\n\n\tif (looseNodes.size() > 0) {\n\t\tstatus[looseNodes[0]] = looseNodes[0]; // mark first as seed\n\t\tfor (index k = 1; k < looseNodes.size(); ++k) {\n\t\t\tstatus[looseNodes[k]] = looseNodes[0]; // first loose nodes becomes seed\n\t\t}\n\n\t\tnc -= looseNodes.size() - 1;\n\t}\n}\n\nvoid MultiLevelSetup::computeStrongAdjacencyMatrix(const CSRMatrix &matrix, CSRMatrix &strongAdjMatrix) const {\n\tstd::vector<double> maxNeighbor(matrix.numberOfRows(), std::numeric_limits<double>::min());\n#pragma omp parallel for\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tmatrix.forNonZeroElementsInRow(i, [&](index j, double value) {\n\t\t\tif (i != j && -value > maxNeighbor[i]) {\n\t\t\t\tmaxNeighbor[i] = -value;\n\t\t\t}\n\t\t});\n\t}\n\n\tstd::vector<index> rowIdx(matrix.numberOfRows()+1, 0);\n\tmatrix.parallelForNonZeroElementsInRowOrder([&](index i, index j, double value) {\n\t\tif (i != j && std::abs(value) >= 0.1 * std::min(maxNeighbor[i], maxNeighbor[j])) {\n\t\t\t++rowIdx[i+1];\n\t\t}\n\t});\n\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\trowIdx[i+1] += rowIdx[i];\n\t}\n\n\tcount nnz = rowIdx[matrix.numberOfRows()];\n\tstd::vector<index> columnIdx(nnz);\n\tstd::vector<double> nonZeros(nnz);\n\n#pragma omp parallel for\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tindex cIdx = rowIdx[i];\n\t\tmatrix.forNonZeroElementsInRow(i, [&](index j, double value) {\n\t\t\tif (i != j && std::abs(value) >= 0.1 * std::min(maxNeighbor[i], maxNeighbor[j])) {\n\t\t\t\tcolumnIdx[cIdx] = j;\n\t\t\t\tnonZeros[cIdx] = -value;\n\t\t\t\t++cIdx;\n\t\t\t}\n\t\t});\n\t}\n\n\tstrongAdjMatrix = CSRMatrix(matrix.numberOfRows(), matrix.numberOfColumns(), rowIdx, columnIdx, nonZeros, matrix.sorted());\n}\n\nvoid MultiLevelSetup::computeAffinityMatrix(const CSRMatrix &matrix, const std::vector<Vector> &tVs, CSRMatrix &affinityMatrix) const {\n\tassert(tVs.size() > 0);\n\n\tstd::vector<index> rowIdx(matrix.numberOfRows()+1);\n\tstd::vector<index> columnIdx(matrix.nnz());\n\tstd::vector<double> nonZeros(matrix.nnz());\n\n#pragma omp parallel for\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\trowIdx[i+1] = matrix.nnzInRow(i);\n\t}\n\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\trowIdx[i+1] += rowIdx[i];\n\t}\n\n\tstd::vector<double> normSquared(matrix.numberOfRows(), 0.0);\n#pragma omp parallel for\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tfor (index k = 0; k < tVs.size(); ++k) {\n\t\t\tnormSquared[i] += tVs[k][i] * tVs[k][i];\n\t\t}\n\t}\n\n#pragma omp parallel for\n\tfor (index i = 0; i < matrix.numberOfRows(); ++i) {\n\t\tdouble nir = 1.0 / normSquared[i];\n\t\tindex cIdx = rowIdx[i];\n\t\tmatrix.forNonZeroElementsInRow(i, [&](index j, double val) {\n\t\t\tdouble ij = 0.0;\n\t\t\tfor (index k = 0; k < tVs.size(); ++k) {\n\t\t\t\tij += tVs[k][i] * tVs[k][j];\n\t\t\t}\n\n\t\t\tdouble value = (ij * ij) * nir / normSquared[j];\n\t\t\tcolumnIdx[cIdx] = j;\n\t\t\tnonZeros[cIdx] = value;\n\t\t\t++cIdx;\n\t\t});\n\t}\n\n\taffinityMatrix = CSRMatrix(matrix.numberOfRows(), matrix.numberOfColumns(), rowIdx, columnIdx, nonZeros, matrix.sorted());\n}\n\nvoid MultiLevelSetup::aggregationStage(const CSRMatrix &matrix, count &nc, const CSRMatrix &strongAdjMatrix, const CSRMatrix &affinityMatrix, std::vector<Vector> &tVs, std::vector<index> &status) const {\n\tstd::vector<std::vector<index>> bins(10);\n\tcomputeStrongNeighbors(affinityMatrix, status, bins);\n\n\tstd::vector<double> diag(matrix.numberOfRows(), 0.0);\n#pragma omp parallel for\n\tfor (index i = 0 ; i < matrix.numberOfRows(); ++i) {\n\t\tdiag[i] = matrix(i,i);\n\t}\n\n\tfor (index k = bins.size(); k-- > 0;) { // iterate over undecided nodes with strong neighbors in decreasing order of strongest neighbor\n\t\tfor (index i : bins[k]) {\n\t\t\tif (status[i] == UNDECIDED) { // node is still undecided\n\t\t\t\tindex s = 0;\n\t\t\t\tif (findBestSeedEnergyCorrected(strongAdjMatrix, affinityMatrix, diag, tVs, status, i, s)) {\n\t\t\t\t\tstatus[s] = s; // s becomes seed\n\t\t\t\t\tstatus[i] = s; // i's seed is s\n\t\t\t\t\tnc--;\n\n\t\t\t\t\tfor (index j = 0; j < tVs.size(); ++j) { // update test vectors\n\t\t\t\t\t\ttVs[j][i] = tVs[j][s];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif (nc <= matrix.numberOfRows() * SETUP_COARSENING_WORK_GUARD / SETUP_CYCLE_INDEX) {\n\t\t\tbreak;\n\t\t}\n\t} // iterate over bins\n}\n\nvoid MultiLevelSetup::computeStrongNeighbors(const CSRMatrix &affinityMatrix, const std::vector<index> &status, std::vector<std::vector<index>> &bins) const {\n\tstd::vector<bool> undecided(affinityMatrix.numberOfRows(), false);\n\tstd::vector<double> maxNeighbor(affinityMatrix.numberOfRows(), std::numeric_limits<double>::min());\n\tdouble overallMax = 0.0;\n\tdouble overallMin = std::numeric_limits<double>::max();\n\n\taffinityMatrix.parallelForNonZeroElementsInRowOrder([&](index i, index j, double value) { // determine the highest affinity neighbor of each node\n\t\tif (status[i] == UNDECIDED && (status[j] == UNDECIDED || status[j] == j)) { // i is UNDECIDED and its neighbor j is also UNDECIDED or SEED\n\t\t\tif (value > maxNeighbor[i]) {\n\t\t\t\tmaxNeighbor[i] = value;\n\t\t\t}\n\t\t\tundecided[i] = true;\n\t\t}\n\t});\n\n\tfor (index i = 0; i < affinityMatrix.numberOfRows(); ++i) {\n\t\tif (maxNeighbor[i] > overallMax) {\n\t\t\toverallMax = maxNeighbor[i];\n\t\t}\n\t\tif (maxNeighbor[i] < overallMin) {\n\t\t\toverallMin = maxNeighbor[i];\n\t\t}\n\t}\n\n\tdouble h = fabs(overallMax - overallMin) < 1e-15? 1.0 : (double) bins.size() / (overallMax - overallMin);\n\tfor (index i = 0; i < affinityMatrix.numberOfRows(); ++i) {\n\t\tif (undecided[i]) { // undecided nodes with strong neighbors\n\t\t\tindex binIndex = (index) std::floor(h * (maxNeighbor[i] - overallMin));\n\t\t\tif (binIndex == bins.size()) { // last interval is closed on the right\n\t\t\t\tbinIndex--;\n\t\t\t}\n\n\t\t\tassert(binIndex >= 0 && binIndex < bins.size());\n\t\t\tbins[binIndex].push_back(i);\n\t\t}\n\t}\n}\n\nbool MultiLevelSetup::findBestSeedEnergyCorrected(const CSRMatrix &strongAdjMatrix, const CSRMatrix &affinityMatrix, const std::vector<double> &diag, const std::vector<Vector> &tVs, const std::vector<index> &status, const index u, index &s) const {\n\tbool foundSeed = false;\n\tstd::vector<double> r(tVs.size(), 0.0);\n\tstd::vector<double> q(tVs.size(), 0.0);\n\tstd::vector<double> E(tVs.size(), 0.0);\n\n\tdouble d = diag[u];\n\tdouble d2 = 0.5 * diag[u];\n\tfor (index k = 0; k < tVs.size(); ++k) {\n\t\tdouble rr = 0.0;\n\t\tdouble qq = 0.0;\n\t\tstrongAdjMatrix.forNonZeroElementsInRow(u, [&](index v, double value) {\n\t\t\trr += value * tVs[k][v];\n\t\t\tqq += value * 0.5 * tVs[k][v] * tVs[k][v];\n\t\t});\n\n\t\tr[k] = rr;\n\t\tq[k] = qq;\n\t\tdouble y = rr/d;\n\t\tE[k] = (d2*y - rr)*y + qq;\n\t}\n\n\tdouble maxNeighbor = -1.0;\n\taffinityMatrix.forNonZeroElementsInRow(u, [&](index v, double value) {\n\t\tif (status[v] == UNDECIDED || status[v] == v) {\n\t\t\tdouble maxMu = -1.0;\n\t\t\tbool smallRatio = true;\n\t\t\tfor (index k = 0; k < tVs.size(); ++k) {\n\t\t\t\tdouble xv = tVs[k][v];\n\t\t\t\tdouble Ec = (d2*xv - r[k])*xv + q[k];\n\t\t\t\tdouble mu = Ec / (E[k] + 1e-15);\n\n\t\t\t\tif (mu > maxMu) {\n\t\t\t\t\tmaxMu = mu;\n\t\t\t\t}\n\t\t\t\tif (maxMu > 2.5) {\n\t\t\t\t\tsmallRatio = false;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (smallRatio && value > maxNeighbor) {\n\t\t\t\tmaxNeighbor = value;\n\t\t\t\ts = v;\n\t\t\t\tfoundSeed = true;\n\t\t\t}\n\t\t}\n\t});\n\n\treturn foundSeed;\n}\n\nbool MultiLevelSetup::canCoarsen(const CSRMatrix &A) const {\n\treturn A.numberOfRows() > MAX_DIRECT_SOLVE_SIZE;\n}\n\nbool MultiLevelSetup::isRelaxationFast(const CSRMatrix &A, index lvlIndex, Vector &tv) const {\n\tcount nu = SETUP_RELAX_ACF_MIN_SWEEPS + 2 * (lvlIndex - 1);\n\tcount tvNu = SETUP_TV_SWEEPS;\n\tcount initial = 3;\n\n\t// create testVector in [-1,1]\n\ttv = Vector(A.numberOfRows());\n\tfor (index i = 0; i < tv.getDimension(); ++i) {\n\t\ttv[i] = 2.0 * Aux::Random::probability() - 1.0;\n\t}\n\n\tVector b(A.numberOfRows(), 0.0);\n\tVector x = tv;\n\tx = smoother.relax(A, b, x, initial);\n\ttv = smoother.relax(A, b, x, tvNu - initial);\n\tVector y = smoother.relax(A, b, tv, nu - tvNu);\n\tdouble relaxAcf = std::pow((y - y.mean()).length() / (x - x.mean()).length(), (double) 1.0 / (double) (nu - initial));\n\treturn relaxAcf <= SETUP_MAX_COARSE_RELAX_ACF || !canCoarsen(A);\n}\n\nvoid MultiLevelSetup::galerkinOperator(const CSRMatrix &P, const CSRMatrix &A, const std::vector<index> &PColIndex, const std::vector<std::vector<index>> &PRowIndex, CSRMatrix &B) const {\n\tstd::vector<CSRMatrix::Triple> triples;\n\tSparseAccumulator spa(P.numberOfColumns());\n\tfor (index i = 0; i < P.numberOfColumns(); ++i) {\n\t\tfor (index k : PRowIndex[i]) {\n\t\t\tdouble Pki = P(k,i);\n\t\t\tA.forNonZeroElementsInRow(k, [&](index l, double value) {\n\t\t\t\tindex j = PColIndex[l];\n\t\t\t\tspa.scatter(Pki * value * P(l, j), j);\n\t\t\t});\n\t\t}\n\n\t\tspa.gather([&](index i, index j, double value) {\n\t\t\ttriples.push_back({i,j,value});\n\t\t});\n\n\t\tspa.increaseRow();\n\t}\n\n\tB = CSRMatrix(P.numberOfColumns(), P.numberOfColumns(), triples, true);\n}\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.597282886505127, "alphanum_fraction": 0.6482290029525757, "avg_line_length": 22.409090042114258, "blob_id": "ccaede635fce025246f2e49a975ea33e6ac9e61f", "content_id": "f29a59fba167374e062174266e21f66e6ad57fb2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2061, "license_type": "permissive", "max_line_length": 57, "num_lines": 88, "path": "/networkit/cpp/algebraic/test/IncidenceMatrixGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * IncidenceMatrixGTest.cpp\n *\n * Created on: 01.04.2014\n * Author: Michael\n */\n\n#include \"IncidenceMatrixGTest.h\"\n\n\nnamespace NetworKit {\n\nIncidenceMatrixGTest::IncidenceMatrixGTest() {\n}\n\nIncidenceMatrixGTest::~IncidenceMatrixGTest() {\n}\n\nTEST_F(IncidenceMatrixGTest, testElementAccess) {\n\tIncidenceMatrix mat(graph);\n\tASSERT_EQ(graph.numberOfNodes(), mat.numberOfRows());\n\tASSERT_EQ(graph.numberOfEdges(), mat.numberOfColumns());\n\n\tEXPECT_EQ(sqrt(graph.weight(0,1)), mat(0,0));\n\tEXPECT_EQ(-sqrt(graph.weight(0,1)), mat(1,0));\n\tfor (uint64_t i = 2; i < mat.numberOfRows(); ++i) {\n\t\tEXPECT_EQ(0.0, mat(i, 0));\n\t}\n\n\tEXPECT_EQ(-sqrt(graph.weight(0,2)), mat(2,1));\n\n\tEXPECT_EQ(-sqrt(graph.weight(0,3)), mat(3,2));\n\tEXPECT_EQ(-sqrt(graph.weight(2,3)), mat(3,3));\n\n\tfor (uint64_t i = 0; i < mat.numberOfRows(); ++i) {\n\t\tEXPECT_EQ(0.0, mat(i, 5));\n\t}\n}\n\nTEST_F(IncidenceMatrixGTest, testRowAndColumnAccess) {\n\tIncidenceMatrix mat(graph);\n\n\tVector row0 = mat.row(0);\n\tASSERT_EQ(row0.getDimension(), mat.numberOfColumns());\n\n\tEXPECT_EQ(sqrt(graph.weight(0,1)), row0[0]);\n\tEXPECT_EQ(sqrt(graph.weight(0,2)), row0[1]);\n\tEXPECT_EQ(sqrt(graph.weight(0,3)), row0[2]);\n\tfor (uint64_t j = 3; j < row0.getDimension(); ++j) {\n\t\tEXPECT_EQ(0.0, row0[j]);\n\t}\n\n\tfor (uint64_t j = 0; j < 5; ++j) {\n\t\tVector column = mat.column(j);\n\t\tASSERT_EQ(column.getDimension(), mat.numberOfRows());\n\n\t\tdouble sum = 0.0;\n\t\tfor (uint64_t i = 0; i < column.getDimension(); ++i) {\n\t\t\tsum += column[i];\n\t\t}\n\n\t\tEXPECT_EQ(0.0, sum);\n\t}\n\n\tVector column5 = mat.column(5);\n\tASSERT_EQ(column5.getDimension(), mat.numberOfRows());\n\n\tfor (uint64_t i = 0; i < column5.getDimension(); ++i) {\n\t\tEXPECT_EQ(0.0, column5[i]);\n\t}\n}\n\nTEST_F(IncidenceMatrixGTest, testMatrixVectorProduct) {\n\tIncidenceMatrix mat(graph);\n\tVector v = {12, 3, 9, 28, 0, -1};\n\n\tVector result = mat * v;\n\tASSERT_EQ(result.getDimension(), mat.numberOfRows());\n\n\tEXPECT_EQ(69, result[0]);\n\tEXPECT_EQ(-24, result[1]);\n\tEXPECT_EQ(19, result[2]);\n\tEXPECT_EQ(-64, result[3]);\n\tEXPECT_EQ(0, result[4]);\n}\n\n\n} /* namespace NetworKit */\n\n" }, { "alpha_fraction": 0.7311851978302002, "alphanum_fraction": 0.7324244976043701, "avg_line_length": 40.67136001586914, "blob_id": "20593161c20b1a17b9ea9f820f395d912e2985b2", "content_id": "542bc5a41dc2ab847f4fc67abbbc5f5ba211d176", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8878, "license_type": "permissive", "max_line_length": 231, "num_lines": 213, "path": "/networkit/cpp/numerics/LAMG/MultiLevelSetup.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MultiLevelSetup.h\n *\n * Created on: 10.01.2015\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef MULTILEVELSETUP_H_\n#define MULTILEVELSETUP_H_\n\n#include \"LevelHierarchy.h\"\n#include \"../Smoother.h\"\n#include \"../../algebraic/CSRMatrix.h\"\n\n#include <limits>\n\nnamespace NetworKit {\n\n#define UNDECIDED std::numeric_limits<index>::max()\n\n/**\n * @ingroup\n * Implements the setup phase of LAMG (Lean Algebraic Multigrid by Livne et al.).\n */\nclass MultiLevelSetup {\n\nprivate:\n\tconst Smoother &smoother;\n#ifndef NDEBUG\n\tstatic count eliminationTime;\n\tstatic count schurComplementTime;\n\tstatic count aggregationTime;\n#endif\n\n\t/**\n\t * Elimination phase of LAMG for the specified Laplacian matrix @a matrix. If an elemination stage is created,\n\t * it is stored in the LevelHierarchy @a hierarchy and the method returns @code{true}. Otherwise, @code{false}\n\t * is returned.\n\t * @param matrix Laplacian matrix for which an elimination stage should be created.\n\t * @param hierarchy LevelHierarchy in which the created elimination stage is stored (if created)\n\t * @return @code{True} if an elimination stage has been created, otherwise @code{false}.\n\t */\n\tbool coarseningElimination(CSRMatrix &matrix, LevelHierarchy &hierarchy) const;\n\n\t/**\n\t * Scans the Laplacian matrix for nodes with a low degree (i.e. nodes with less than 5 neighbors). For each node\n\t * having low degree and independent from already found low degree nodes, @code{true} is stored in @a fNode. The\n\t * @a stage parameter specifies if we are in the first or subsequent stages during elimination.\n\t * @param matrix Laplacian matrix.\n\t * @param fNode[out] For each node, @code{true} if the node is of low degree and @code{false} otherwise.\n\t * @param stage The stage of the elimination phase.\n\t * @return Number of low degree nodes found.\n\t */\n\tcount lowDegreeSweep(const CSRMatrix &matrix, std::vector<bool> &fNode, index stage) const;\n\n\t/**\n\t * Computes the projection matrix @a P and the @a q vector used to restrict and interpolate the matrix for an\n\t * elimination stage.\n\t * @param matrix Laplacian matrix.\n\t * @param fSet Set of nodes having low degree.\n\t * @param coarseIndex Set of nodes equal to V \\setminus fSet\n\t * @param P[out] The projection matrix.\n\t * @param q[out] The q vector.\n\t */\n\tvoid eliminationOperators(const CSRMatrix &matrix, const std::vector<index> &fSet, const std::vector<index> &coarseIndex, CSRMatrix &P, Vector &q) const;\n\n\t/**\n\t * Computes the submatrix specified by @a rows and @a columns and stores the result in @a result. The @a coarseIndex\n\t * is used to efficiently check whether the value is part of the submatrix or not.\n\t * @param matrix Laplacian matrix.\n\t * @param rows Rows indices to include in submatrix.\n\t * @param columns Column indices to include in submatrix.\n\t * @param coarseIndex\n\t * @param result[out] The resulting submatrix.\n\t */\n\tvoid subMatrix(const CSRMatrix &matrix, const std::vector<index> &rows, const std::vector<index> &columns, const std::vector<index> &coarseIndex, CSRMatrix &result) const;\n\n\n\t/**\n\t * Aggregation phase of LAMG for the specified Laplacian matrix @a matrix. The coarsened matrix is stored in the\n\t * LevelHierarchy @a hierarchy. The test vector @a tv is used for determining the affinity of nodes. The parameter\n\t * @a numTVVectors specifies how many test vectors will be created to determine the affinities between nodes.\n\t * @param matrix Laplacian matrix.\n\t * @param hierarchy LevelHierarchy to store the stage.\n\t * @param tv Test vector.\n\t * @param numTVVectors Number of test vectors to use.\n\t */\n\tvoid coarseningAggregation(CSRMatrix &matrix, LevelHierarchy &hierarchy, Vector &tv, count numTVVectors) const;\n\n\t/**\n\t * Create @a numVectors test vectors for the given Laplacian matrix @a matrix. The test vector @a tv will be\n\t * reused.\n\t * @param matrix Laplacian matrix.\n\t * @param tv Test vector.\n\t * @param numVectors Number of test vectors to create.\n\t * @return The created test vectors.\n\t */\n\tstd::vector<Vector> generateTVs(const CSRMatrix &matrix, Vector &tv, const count numVectors) const;\n\n\t/**\n\t * Adds high degree nodes as seeds to @a status.\n\t * @param matrix Laplacian matrix.\n\t * @param status[out] High degree nodes are flagged as seed.\n\t */\n\tvoid addHighDegreeSeedNodes(const CSRMatrix &matrix, std::vector<index> &status) const;\n\n\t/**\n\t * Aggregates loose nodes (nodes with low adjacency) together.\n\t * @param strongAdjMatrix Strong adjacency matrix.\n\t * @param status[out] Aggregates loose nodes together and labels them in @a status accordingly.\n\t * @param nc[out] The altered number of coarse nodes.\n\t */\n\tvoid aggregateLooseNodes(const CSRMatrix &strongAdjMatrix, std::vector<index> &status, count &nc) const;\n\n\t/**\n\t * Computes the strong adjacency matrix for the given Laplacian matrix @a matrix.\n\t * @param matrix Laplacian matrix.\n\t * @param strongAdjMatrix[out] The resulting strong adjacency matrix.\n\t */\n\tvoid computeStrongAdjacencyMatrix(const CSRMatrix &matrix, CSRMatrix &strongAdjMatrix) const;\n\n\t/**\n\t * Computes the affinity matrix for the given Laplacian matrix @a matrix and the test vectors @a tVs.\n\t * @param matrix Laplacian matrix.\n\t * @param tVs Test vectors.\n\t * @param affinityMatrix[out] The resulting affinity matrix.\n\t */\n\tvoid computeAffinityMatrix(const CSRMatrix &matrix, const std::vector<Vector> &tVs, CSRMatrix &affinityMatrix) const;\n\n\t/**\n\t * Models one stage in the aggregation phase. New aggregates are labeled accordingly in @a status.\n\t * @param matrix Laplacian matrix.\n\t * @param nc Number of coarse nodes.\n\t * @param strongAdjMatrix Strong adjacency matrix.\n\t * @param affinityMatrix Affinity matrix.\n\t * @param tVs[out] Test vectors.\n\t * @param status[out] Aggregation labels.\n\t */\n\tvoid aggregationStage(const CSRMatrix &matrix, count &nc, const CSRMatrix &strongAdjMatrix, const CSRMatrix &affinityMatrix, std::vector<Vector> &tVs, std::vector<index> &status) const;\n\n\t/**\n\t * Computes strong (cf. LAMG paper) neighbors and stores them sorted into @a bins.\n\t * @param affinityMatrix Affinity matrix.\n\t * @param status Aggregation labels.\n\t * @param bins[out] Strong neighbors sorted into bins.\n\t */\n\tvoid computeStrongNeighbors(const CSRMatrix &affinityMatrix, const std::vector<index> &status, std::vector<std::vector<index>> &bins) const;\n\n\t/**\n\t * Finds the best seed for node @a u and stores it in @a s.\n\t * @param strongAdjMatrix\n\t * @param affinityMatrix Affinity matrix.\n\t * @param diag Vector of diagonal entries of the Laplacian matrix.\n\t * @param tVs Test vectors.\n\t * @param status Aggregation labels.\n\t * @param u The node to find the best seed for.\n\t * @param s[out] The best seed for node @a u.\n\t * @return @code{True} if a seed has been found for @a u, @code{false} otherwise.\n\t */\n\tbool findBestSeedEnergyCorrected(const CSRMatrix &strongAdjMatrix, const CSRMatrix &affinityMatrix, const std::vector<double> &diag, const std::vector<Vector> &tVs, const std::vector<index> &status, const index u, index &s) const;\n\n\n\t/**\n\t * Determines if the Laplacian matrix @a A can be coarsened further.\n\t * @param A Laplacian matrix.\n\t * @return @code{True} if @a A can be coarsened further, @code{false} otherwise.\n\t */\n\tbool canCoarsen(const CSRMatrix &A) const;\n\n\t/**\n\t * Determines if the relaxation is fast enough to stop coarsening.\n\t * @param A Laplacian matrix.\n\t * @param lvlIndex The number of levels already created in the hierarchy.\n\t * @param tv Test vector.\n\t * @return @code{True} if convergence of relaxation is fast, @code{false} otherwise.\n\t */\n\tbool isRelaxationFast(const CSRMatrix &A, index lvlIndex, Vector &tv) const;\n\n\t/**\n\t * Computes the coarsened matrix of @a matrix by means of the projection matrix @a P and stores the result in @a B.\n\t * @param P Projection matrix.\n\t * @param A Laplacian matrix.\n\t * @param PColIndex Stores the column index of the 1 entry at each row.\n\t * @param PRowIndex Stores the row index of the 1 entry at each column.\n\t * @param B[out] Resulting coarsened Laplacian matrix.\n\t */\n\tvoid galerkinOperator(const CSRMatrix &P, const CSRMatrix &A, const std::vector<index> &PColIndex, const std::vector<std::vector<index>> &PRowIndex, CSRMatrix &B) const;\n\npublic:\n\t/**\n\t * Creates an instance of MultiLevelSetup with the specified @a smoother used for relaxing during the setup phase.\n\t * @param smoother Reference to smoother.\n\t */\n\tMultiLevelSetup(const Smoother &smoother);\n\n\t/**\n\t * Creates a @å hierarchy for the given Laplacian matrix of the graph @a G.\n\t * @param G The graph.\n\t * @param hierarchy[out] The constructed hierarchy.\n\t */\n\tvoid setup(const Graph &G, LevelHierarchy &hierarchy) const;\n\n\t/**\n\t * Creates a @a hierarchy for the given Laplacian matrix @a matrix.\n\t * @param matrix Laplcian matrix.\n\t * @param hierarchy[out] The constructed hierarchy.\n\t */\n\tvoid setup(const CSRMatrix &matrix, LevelHierarchy &hierarchy) const;\n};\n\n} /* namespace NetworKit */\n\n#endif /* MULTILEVELSETUP_H_ */\n" }, { "alpha_fraction": 0.7067669034004211, "alphanum_fraction": 0.7268170714378357, "avg_line_length": 16.34782600402832, "blob_id": "be13cdb112c3dd46c5c3a3cdc65fc4f6ef3ff776", "content_id": "d22b6ecdd40eede8b673b7048d8ad73e0f2f7667", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 399, "license_type": "permissive", "max_line_length": 53, "num_lines": 23, "path": "/networkit/cpp/centrality/test/ApproxBetweennessGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ApproxBetweennessGTest.h\n *\n * Created on: 30.06.2014\n * Author: moritzl\n */\n\n#ifndef APPROXBETWEENNESSGTEST_H_\n#define APPROXBETWEENNESSGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass ApproxBetweennessGTest : public testing::Test {\npublic:\n\tApproxBetweennessGTest() = default;\n\tvirtual ~ApproxBetweennessGTest() = default;\n};\n\n}\n\n#endif /* APPROXBETWEENNESSGTEST_H_ */\n" }, { "alpha_fraction": 0.6201528310775757, "alphanum_fraction": 0.6288706660270691, "avg_line_length": 25.05457305908203, "blob_id": "daa7dbf154c185a67f39d7359a607830f90d8fdd", "content_id": "560748ec8c24cf0e15167d6f0fa52c53f9a3ee45", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 17665, "license_type": "permissive", "max_line_length": 290, "num_lines": 678, "path": "/networkit/cpp/algebraic/CSRMatrix.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CSRMatrix.cpp\n *\n * Created on: May 6, 2015\n * Author: Michael\n */\n\n#include \"CSRMatrix.h\"\n\n#include <cassert>\n#include <atomic>\n#include \"omp.h\"\n\nnamespace NetworKit {\n\n/** Floating point epsilon to use in comparisons. */\nconstexpr double EPSILON = 1e-9;\n\nCSRMatrix::CSRMatrix() : rowIdx(0), columnIdx(0), nonZeros(0), nRows(0), nCols(0), isSorted(true) {\n}\n\nCSRMatrix::CSRMatrix(const count nRows, const count nCols, const std::vector<std::pair<index, index>> &positions, const std::vector<double> &values, bool isSorted) : nRows(nRows), nCols(nCols), isSorted(isSorted) {\n\tcount nnz = values.size();\n\trowIdx = std::vector<index>(nRows + 1, 0);\n\tcolumnIdx = std::vector<index>(nnz);\n\tnonZeros = std::vector<double>(nnz);\n\n\tfor (index i = 0; i < nnz; ++i) {\n\t\trowIdx[positions[i].first]++;\n\t}\n\n\tfor (index i = 0, prefixSum = 0; i < nRows; ++i) {\n\t\tcount nnzInRow = rowIdx[i];\n\t\trowIdx[i] = prefixSum;\n\t\tprefixSum += nnzInRow;\n\t}\n\trowIdx[nRows] = nnz;\n\n\tfor (index i = 0; i < nnz; ++i) {\n\t\tindex row = positions[i].first;\n\t\tindex dest = rowIdx[row];\n\n\t\tcolumnIdx[dest] = positions[i].second;\n\t\tnonZeros[dest] = values[i];\n\n\t\trowIdx[row]++;\n\t}\n\n\tfor (index i = 0, firstIdxOfRow = 0; i <= nRows; ++i) {\n\t\tindex newRow = rowIdx[i];\n\t\trowIdx[i] = firstIdxOfRow;\n\t\tfirstIdxOfRow = newRow;\n\t}\n}\n\nCSRMatrix::CSRMatrix(const count nRows, const count nCols, const std::vector<Triple> &triples, bool isSorted) : nRows(nRows), nCols(nCols), isSorted(isSorted) {\n\tcount nnz = triples.size();\n\trowIdx = std::vector<index>(nRows + 1, 0);\n\tcolumnIdx = std::vector<index>(nnz);\n\tnonZeros = std::vector<double>(nnz);\n\n\tfor (index i = 0; i < nnz; ++i) {\n\t\trowIdx[triples[i].row]++;\n\t}\n\n\tfor (index i = 0, prefixSum = 0; i < nRows; ++i) {\n\t\tcount nnzInRow = rowIdx[i];\n\t\trowIdx[i] = prefixSum;\n\t\tprefixSum += nnzInRow;\n\t}\n\trowIdx[nRows] = nnz;\n\n\tfor (index i = 0; i < nnz; ++i) {\n\t\tindex row = triples[i].row;\n\t\tindex dest = rowIdx[row];\n\n\t\tcolumnIdx[dest] = triples[i].column;\n\t\tnonZeros[dest] = triples[i].value;\n\n\t\trowIdx[row]++;\n\t}\n\n\tfor (index i = 0, firstIdxOfRow = 0; i <= nRows; ++i) {\n\t\tindex newRow = rowIdx[i];\n\t\trowIdx[i] = firstIdxOfRow;\n\t\tfirstIdxOfRow = newRow;\n\t}\n}\n\nCSRMatrix::CSRMatrix(const count nRows, const count nCols, const std::vector<std::vector<index>> &columnIdx, const std::vector<std::vector<double>> &values, bool isSorted) : nRows(nRows), nCols(nCols), isSorted(isSorted) {\n\t rowIdx = std::vector<index>(nRows + 1, 0);\n\t count nnz = columnIdx[0].size();\n\t for (index i = 1; i < columnIdx.size(); ++i) {\n\t\t rowIdx[i] = rowIdx[i-1] + columnIdx[i-1].size();\n\t\t nnz += columnIdx[i].size();\n\t }\n\t rowIdx[nRows] = nnz;\n\n\t this->columnIdx = std::vector<index>(nnz);\n\t this->nonZeros = std::vector<double>(nnz);\n\n#pragma omp parallel for\n\t for (index i = 0; i < nRows; ++i) {\n\t\t for (index k = 0; k < columnIdx[i].size(); ++k) {\n\t\t\t this->columnIdx[rowIdx[i] + k] = columnIdx[i][k];\n\t\t\t nonZeros[rowIdx[i] + k] = values[i][k];\n\t\t }\n\t }\n}\n\nCSRMatrix::CSRMatrix(const count nRows, const count nCols, const std::vector<index> &rowIdx, const std::vector<index> &columnIdx, const std::vector<double> &nonZeros, bool isSorted) : rowIdx(rowIdx), columnIdx(columnIdx), nonZeros(nonZeros), nRows(nRows), nCols(nCols), isSorted(isSorted) {\n}\n\ncount CSRMatrix::nnzInRow(const index i) const {\n\tassert(i >= 0 && i < nRows);\n\treturn rowIdx[i+1] - rowIdx[i];\n}\n\ncount CSRMatrix::nnz() const {\n\treturn nonZeros.size();\n}\n\ndouble CSRMatrix::operator()(const index i, const index j) const {\n\tassert(i >= 0 && i < nRows);\n\tassert(j >= 0 && j < nCols);\n\n\tdouble value = 0.0;\n\tfor (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) {\n\t\tif (columnIdx[k] == j) {\n\t\t\tvalue = nonZeros[k];\n\t\t\tbreak;\n\t\t}\n\t}\n\n\treturn value;\n}\n\nvoid CSRMatrix::quicksort(index left, index right) {\n\tif (left >= right) return;\n\tindex pivotIdx = partition(left, right);\n\tif (pivotIdx != 0) {\n\t\tquicksort(left, pivotIdx-1);\n\t}\n\tquicksort(pivotIdx+1, right);\n}\n\nindex CSRMatrix::partition(index left, index right) {\n\tindex mid = (left + right) / 2;\n\tindex pivot = columnIdx[mid];\n\tstd::swap(columnIdx[mid], columnIdx[right]);\n\tstd::swap(nonZeros[mid], nonZeros[right]);\n\n\tindex i = left;\n\tfor (index j = left; j < right; ++j) {\n\t\tif (columnIdx[j] <= pivot) {\n\t\t\tstd::swap(columnIdx[i], columnIdx[j]);\n\t\t\tstd::swap(nonZeros[i], nonZeros[j]);\n\t\t\t++i;\n\t\t}\n\t}\n\tstd::swap(columnIdx[i], columnIdx[right]);\n\tstd::swap(nonZeros[i], nonZeros[right]);\n\treturn i;\n}\n\nvoid CSRMatrix::sort() {\n#pragma omp parallel for schedule(guided)\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tif (rowIdx[i+1] - rowIdx[i] > 1) {\n\t\t\tquicksort(rowIdx[i], rowIdx[i+1]-1);\n\t\t}\n\t}\n\n\tisSorted = true;\n}\n\nbool CSRMatrix::sorted() const {\n#ifndef NDEBUG\n\tbool sorted = true;\n#pragma omp parallel for\n\tfor (index i = 0; i < nRows; ++i) {\n\t\tfor (index j = rowIdx[i]+1; j < rowIdx[i+1]; ++j) {\n\t\t\tif (columnIdx[j-1] > columnIdx[j]) {\n\t\t\t\tsorted = false;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn sorted;\n#endif\n\n\treturn isSorted;\n}\n\nVector CSRMatrix::row(const index i) const {\n\tassert(i >= 0 && i < nRows);\n\n\tVector row(numberOfColumns(), 0.0, true);\n\tparallelForNonZeroElementsInRow(i, [&](index j, double value) {\n\t\trow[j] = value;\n\t});\n\n\treturn row;\n}\n\nVector CSRMatrix::column(const index j) const {\n\tassert(j >= 0 && j < nCols);\n\n\tVector column(numberOfRows());\n#pragma omp parallel for\n\tfor (node i = 0; i < numberOfRows(); ++i) {\n\t\tcolumn[i] = (*this)(i,j);\n\t}\n\n\treturn column;\n}\n\nVector CSRMatrix::diagonal() const {\n\tVector diag(std::min(nRows, nCols), 0.0);\n\n\tif (sorted()) {\n#pragma omp parallel for\n\t\tfor (index i = 0; i < diag.getDimension(); ++i) {\n\t\t\tif (rowIdx[i] == rowIdx[i+1]) continue; // no entry in row i\n\t\t\tindex left = rowIdx[i];\n\t\t\tindex right = rowIdx[i+1]-1;\n\t\t\tindex mid = (left + right) / 2;\n\t\t\twhile (left <= right) {\n\t\t\t\tif (columnIdx[mid] == i) {\n\t\t\t\t\tdiag[i] = nonZeros[mid];\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\n\t\t\t\tif (columnIdx[mid] < i) {\n\t\t\t\t\tleft = mid+1;\n\t\t\t\t} else {\n\t\t\t\t\tright = mid-1;\n\t\t\t\t}\n\n\t\t\t\tmid = (left + right) / 2;\n\t\t\t}\n\t\t}\n\t} else {\n#pragma omp parallel for\n\t\tfor (index i = 0; i < diag.getDimension(); ++i) {\n\t\t\tdiag[i] = (*this)(i,i);\n\t\t}\n\t}\n\n\treturn diag;\n}\n\nCSRMatrix CSRMatrix::operator+(const CSRMatrix &other) const {\n\tassert(nRows == other.nRows && nCols == other.nCols);\n\treturn CSRMatrix::binaryOperator(*this, other, [](double val1, double val2) {return val1 + val2;});\n}\n\nCSRMatrix& CSRMatrix::operator+=(const CSRMatrix &other) {\n\tassert(nRows == other.nRows && nCols == other.nCols);\n\t*this = CSRMatrix::binaryOperator(*this, other, [](double val1, double val2) {return val1 + val2;});\n\treturn *this;\n}\n\nCSRMatrix CSRMatrix::operator-(const CSRMatrix &other) const {\n\tassert(nRows == other.nRows && nCols == other.nCols);\n\treturn CSRMatrix::binaryOperator(*this, other, [](double val1, double val2) {return val1 - val2;});\n}\n\nCSRMatrix& CSRMatrix::operator-=(const CSRMatrix &other) {\n\tassert(nRows == other.nRows && nCols == other.nCols);\n\t*this = CSRMatrix::binaryOperator(*this, other, [](double val1, double val2) {return val1 - val2;});\n\treturn *this;\n}\n\nCSRMatrix CSRMatrix::operator*(const double &scalar) const {\n\treturn CSRMatrix(*this) *= scalar;\n}\n\nCSRMatrix& CSRMatrix::operator*=(const double &scalar) {\n#pragma omp parallel for\n\tfor (index k = 0; k < nonZeros.size(); ++k) {\n\t\tnonZeros[k] *= scalar;\n\t}\n\n\treturn *this;\n}\n\nVector CSRMatrix::operator*(const Vector &vector) const {\n\tassert(!vector.isTransposed());\n\tassert(nCols == vector.getDimension());\n\n\tVector result(nRows, 0.0);\n#pragma omp parallel for\n\tfor (index i = 0; i < numberOfRows(); ++i) {\n\t\tdouble sum = 0.0;\n\t\tfor (index cIdx = rowIdx[i]; cIdx < rowIdx[i+1]; ++cIdx) {\n\t\t\tsum += nonZeros[cIdx] * vector[columnIdx[cIdx]];\n\t\t}\n\t\tresult[i] = sum;\n\t}\n\n\treturn result;\n}\n\nCSRMatrix CSRMatrix::operator*(const CSRMatrix &other) const {\n\tassert(nCols == other.nRows);\n\n\tstd::vector<index> rowIdx(numberOfRows()+1, 0);\n\tstd::vector<index> columnIdx;\n\tstd::vector<double> nonZeros;\n\n#pragma omp parallel\n\t{\n\t\tstd::vector<int64_t> marker(other.numberOfColumns(), -1);\n\t\tcount numThreads = omp_get_num_threads();\n\t\tindex threadId = omp_get_thread_num();\n\n\t\tcount chunkSize = (numberOfRows() + numThreads - 1) / numThreads;\n\t\tindex chunkStart = threadId * chunkSize;\n\t\tindex chunkEnd = std::min(numberOfRows(), chunkStart + chunkSize);\n\n\t\tfor (index i = chunkStart; i < chunkEnd; ++i) {\n\t\t\tfor (index jA = this->rowIdx[i]; jA < this->rowIdx[i+1]; ++jA) {\n\t\t\t\tindex k = this->columnIdx[jA];\n\t\t\t\tfor (index jB = other.rowIdx[k]; jB < other.rowIdx[k+1]; ++jB) {\n\t\t\t\t\tindex j = other.columnIdx[jB];\n\t\t\t\t\tif (marker[j] != (int64_t) i) {\n\t\t\t\t\t\tmarker[j] = i;\n\t\t\t\t\t\t++rowIdx[i+1];\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tstd::fill(marker.begin(), marker.end(), -1);\n\n#pragma omp barrier\n#pragma omp single\n\t\t{\n\t\t\tfor (index i = 0; i < numberOfRows(); ++i) {\n\t\t\t\trowIdx[i+1] += rowIdx[i];\n\t\t\t}\n\n\t\t\tcolumnIdx = std::vector<index>(rowIdx[numberOfRows()]);\n\t\t\tnonZeros = std::vector<double>(rowIdx[numberOfRows()]);\n\t\t}\n\n\t\tfor (index i = chunkStart; i < chunkEnd; ++i) {\n\t\t\tindex rowBegin = rowIdx[i];\n\t\t\tindex rowEnd = rowBegin;\n\n\t\t\tfor (index jA = this->rowIdx[i]; jA < this->rowIdx[i+1]; ++jA) {\n\t\t\t\tindex k = this->columnIdx[jA];\n\t\t\t\tdouble valA = this->nonZeros[jA];\n\n\t\t\t\tfor (index jB = other.rowIdx[k]; jB < other.rowIdx[k+1]; ++jB) {\n\t\t\t\t\tindex j = other.columnIdx[jB];\n\t\t\t\t\tdouble valB = other.nonZeros[jB];\n\n\t\t\t\t\tif (marker[j] < (int64_t) rowBegin) {\n\t\t\t\t\t\tmarker[j] = rowEnd;\n\t\t\t\t\t\tcolumnIdx[rowEnd] = j;\n\t\t\t\t\t\tnonZeros[rowEnd] = valA * valB;\n\t\t\t\t\t\t++rowEnd;\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnonZeros[marker[j]] += valA * valB;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tCSRMatrix result(numberOfRows(), other.numberOfColumns(), rowIdx, columnIdx, nonZeros);\n\tresult.sort();\n\treturn result;\n\n//\tstd::vector<Triple> triples;\n//\n//\tSparseAccumulator spa(numberOfRows());\n//\tfor (index i = 0; i < numberOfRows(); ++i) {\n//\t\tforNonZeroElementsInRow(i, [&](index k, double val1) {\n//\t\t\tother.forNonZeroElementsInRow(k, [&](index j, double val2) {\n//\t\t\t\tspa.scatter(val1 * val2, j);\n//\t\t\t});\n//\t\t});\n//\n//\t\tspa.gather([&](index i, index j, double value){\n//\t\t\ttriples.push_back({i,j,value});\n//\t\t});\n//\n//\t\tspa.increaseRow();\n//\t}\n//\n//\treturn CSRMatrix(nRows, other.nCols, triples, true);\n\n}\n\nCSRMatrix CSRMatrix::operator/(const double &divisor) const {\n\treturn CSRMatrix(*this) /= divisor;\n}\n\nCSRMatrix& CSRMatrix::operator/=(const double &divisor) {\n\treturn *this *= 1.0 / divisor;\n}\n\nCSRMatrix CSRMatrix::subMatrix(const std::vector<index> &rows, const std::vector<index> &columns) const {\n\tindex invalid = std::numeric_limits<index>::max();\n\tstd::vector<index> columnMapping(numberOfColumns(), invalid);\n\tstd::vector<index> rowIdx(rows.size() + 1, 0);\n\n#pragma omp parallel for\n\tfor (index j = 0; j < columns.size(); ++j) {\n\t\tcolumnMapping[columns[j]] = j;\n\t}\n\n\n#pragma omp parallel for\n\tfor (index i = 0; i < rows.size(); ++i) {\n\t\tforNonZeroElementsInRow(rows[i], [&](index j, double val) {\n\t\t\tif (columnMapping[j] != invalid) {\n\t\t\t\trowIdx[i+1]++;\n\t\t\t}\n\t\t});\n\t}\n\n\tfor (index i = 0; i < rows.size(); ++i) {\n\t\trowIdx[i+1] += rowIdx[i];\n\t}\n\n\tcount nnz = rowIdx[rows.size()];\n\tstd::vector<index> columnIdx(nnz);\n\tstd::vector<double> nonZeros(nnz);\n\n#pragma omp parallel for\n\tfor (index i = 0; i < rows.size(); ++i) {\n\t\tindex cIdx = rowIdx[i];\n\t\tforNonZeroElementsInRow(rows[i], [&](index j, double val) {\n\t\t\tif (columnMapping[j] != invalid) { // column is present in submatrix\n\t\t\t\tcolumnIdx[cIdx] = columnMapping[j];\n\t\t\t\tnonZeros[cIdx] = val;\n\t\t\t\tcIdx++;\n\t\t\t}\n\t\t});\n\t}\n\n\treturn CSRMatrix(rows.size(), columns.size(), rowIdx, columnIdx, nonZeros, sorted());\n}\n\nCSRMatrix CSRMatrix::mTmMultiply(const CSRMatrix &A, const CSRMatrix &B) {\n\tassert(A.nRows == B.nRows);\n\n\tstd::vector<std::vector<index>> columnIdx(A.numberOfColumns());\n\tstd::vector<std::vector<double>> values(A.numberOfColumns());\n\n\tfor (index k = 0; k < A.numberOfRows(); ++k) {\n\t\tA.forNonZeroElementsInRow(k, [&](index i, double vA) {\n\t\t\tB.forNonZeroElementsInRow(k, [&](index j, double vB) {\n\t\t\t\tbool found = false;\n\t\t\t\tfor (index l = 0; l < columnIdx[i].size(); ++l) {\n\t\t\t\t\tif (columnIdx[i][l] == j) {\n\t\t\t\t\t\tvalues[i][l] += vA * vB;\n\t\t\t\t\t\tfound = true;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (!found) {\n\t\t\t\t\tcolumnIdx[i].push_back(j);\n\t\t\t\t\tvalues[i].push_back(vA * vB);\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\t}\n\n\treturn CSRMatrix(A.nCols, B.nCols, columnIdx, values);\n}\n\nCSRMatrix CSRMatrix::mmTMultiply(const CSRMatrix &A, const CSRMatrix &B) {\n\tassert(A.nCols == B.nCols);\n\n\tstd::vector<std::vector<index>> columnIdx(A.numberOfRows());\n\tstd::vector<std::vector<double>> values(A.numberOfRows());\n\n\tfor (index i = 0; i < A.numberOfRows(); ++i) {\n\t\tA.forNonZeroElementsInRow(i, [&](index k, double vA) {\n\t\t\tfor (index j = 0; j < B.numberOfRows(); ++j) {\n\t\t\t\tdouble vB = B(j,k);\n\t\t\t\tif (vB != 0.0) {\n\t\t\t\t\tbool found = false;\n\t\t\t\t\tfor (index l = 0; l < columnIdx[i].size(); ++l) {\n\t\t\t\t\t\tif (columnIdx[i][l] == j) {\n\t\t\t\t\t\t\tvalues[i][l] += vA * vB;\n\t\t\t\t\t\t\tfound = true;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif (!found) {\n\t\t\t\t\t\tcolumnIdx[i].push_back(j);\n\t\t\t\t\t\tvalues[i].push_back(vA * vB);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t});\n\t}\n\n\treturn CSRMatrix(A.nRows, B.nRows, columnIdx, values);\n}\n\nVector CSRMatrix::mTvMultiply(const CSRMatrix &matrix, const Vector &vector) {\n\tassert(matrix.nRows == vector.getDimension() && !vector.isTransposed());\n\n\tVector result(matrix.numberOfColumns(), 0.0);\n\tfor (index k = 0; k < matrix.numberOfRows(); ++k) {\n\t\tmatrix.forNonZeroElementsInRow(k, [&](index j, double value) {\n\t\t\tresult[j] += value * vector[k];\n\t\t});\n\t}\n\n\treturn result;\n}\n\nCSRMatrix CSRMatrix::graphLaplacian(const Graph &graph) {\n\tstd::vector<std::pair<index, index>> positions;\n\tstd::vector<double> values;\n\n\tgraph.forNodes([&](const index i){\n\t\tdouble weightedDegree = 0.0;\n\n\t\tdouble selfLoopWeight = 0.0;\n\t\tgraph.forNeighborsOf(i, [&](const index j, double weight) { // - adjacency matrix\n\t\t\tif (j == i) {\n\t\t\t\tselfLoopWeight = weight;\n\t\t\t} else {\n\t\t\t\tpositions.push_back(std::make_pair(i,j));\n\t\t\t\tvalues.push_back(-weight);\n\t\t\t}\n\n\t\t\tweightedDegree += weight;\n\t\t});\n\n\t\tpositions.push_back(std::make_pair(i,i));\n\t\tvalues.push_back(weightedDegree - selfLoopWeight); // degree matrix\n\t});\n\n\treturn CSRMatrix(graph.upperNodeIdBound(), graph.upperNodeIdBound(), positions, values);\n}\n\nCSRMatrix CSRMatrix::adjacencyMatrix(const Graph &graph) {\n\tint nonZeros = graph.isDirected()? graph.numberOfEdges() : graph.numberOfEdges() * 2;\n\n\tstd::vector<std::pair<index, index>> positions(nonZeros);\n\tstd::vector<double> values(nonZeros);\n\n\tint index = 0;\n\tgraph.forEdges([&](node i, node j, double val) {\n\t\tpositions[index] = std::make_pair(i,j);\n\t\tvalues[index] = val;\n\t\tindex++;\n\t\tif (!graph.isDirected() && i != j) {\n\t\t\tpositions[index] = std::make_pair(j,i);\n\t\t\tvalues[index] = val;\n\t\t\tindex++;\n\t\t}\n\t});\n\n\treturn CSRMatrix(graph.numberOfNodes(), graph.numberOfNodes(), positions, values);\n}\n\nGraph CSRMatrix::laplacianToGraph(const CSRMatrix &laplacian) {\n\tassert(isLaplacian(laplacian));\n\tGraph G(std::max(laplacian.numberOfRows(), laplacian.numberOfColumns()), true, false);\n\tlaplacian.forNonZeroElementsInRowOrder([&](node u, node v, edgeweight weight) {\n\t\tif (u != v) { // exclude diagonal\n\t\t\tif (u < v) {\n\t\t\t\tG.addEdge(u, v, -weight);\n\t\t\t}\n\t\t}\n\t});\n\n\treturn G;\n}\n\nGraph CSRMatrix::matrixToGraph(const CSRMatrix &matrix) {\n\tbool directed = !isSymmetric(matrix);\n\tGraph G(std::max(matrix.numberOfRows(), matrix.numberOfColumns()), true, directed);\n\tmatrix.forNonZeroElementsInRowOrder([&](node u, node v, edgeweight weight) {\n\t\tif (directed || u <= v) {\n\t\t\tG.addEdge(u, v, weight);\n\t\t}\n\t});\n\n\treturn G;\n}\n\nbool CSRMatrix::isSymmetric(const CSRMatrix &matrix) {\n\tbool output = true;\n\tmatrix.forNonZeroElementsInRowOrder([&] (index i, index j, edgeweight w) {\n\t\tif (abs(matrix(j, i)-w) > EPSILON) {\n\t\t\toutput = false;\n\t\t}\n\t});\n\tif (!output) INFO(\"not symmetric!\");\n\treturn output;\n}\n\nbool CSRMatrix::isSDD(const CSRMatrix &matrix) {\n\tif (!isSymmetric(matrix)) {\n\t\treturn false;\n\t}\n\n\t/* Criterion: a_ii >= \\sum_{j != i} a_ij */\n\tstd::vector<double> row_sum(matrix.numberOfRows());\n\tmatrix.parallelForNonZeroElementsInRowOrder([&] (node i, node j, double value) {\n\t\tif (i == j) {\n\t\t\trow_sum[i] += value;\n\t\t} else {\n\t\t\trow_sum[i] -= abs(value);\n\t\t}\n\t});\n\n\treturn std::all_of(row_sum.begin(), row_sum.end(), [] (double val) {return val > -EPSILON;});\n}\n\nbool CSRMatrix::isLaplacian(const CSRMatrix &matrix) {\n\tif (!isSymmetric(matrix)) {\n\t\treturn false;\n\t}\n\n\t/* Criterion: \\forall_i \\sum_j A_ij = 0 */\n\tstd::vector<double> row_sum(matrix.numberOfRows());\n\tstd::atomic<bool> right_sign(true);\n\tmatrix.parallelForNonZeroElementsInRowOrder([&] (node i, node j, double value) {\n\t\tif (i != j && value > EPSILON) {\n\t\t\tright_sign = false;\n\t\t}\n\t\trow_sum[i] += value;\n\t});\n\n\treturn right_sign && std::all_of(row_sum.begin(), row_sum.end(), [] (double val) {return abs(val) < EPSILON;});\n}\n\nCSRMatrix CSRMatrix::transpose() const {\n\tstd::vector<index> rowIdx(numberOfColumns()+1);\n\tfor (index i = 0; i < nnz(); ++i) {\n\t\t++rowIdx[columnIdx[i]+1];\n\t}\n\n\tfor (index i = 0; i < numberOfColumns(); ++i) {\n\t\trowIdx[i+1] += rowIdx[i];\n\t}\n\n\tstd::vector<index> columnIdx(rowIdx[numberOfColumns()]);\n\tstd::vector<double> nonZeros(rowIdx[numberOfColumns()]);\n\n\tfor (index i = 0; i < numberOfRows(); ++i) {\n\t\tfor (index j = this->rowIdx[i]; j < this->rowIdx[i+1]; ++j) {\n\t\t\tindex colIdx = this->columnIdx[j];\n\t\t\tcolumnIdx[rowIdx[colIdx]] = i;\n\t\t\tnonZeros[rowIdx[colIdx]] = this->nonZeros[j];\n\t\t\t++rowIdx[colIdx];\n\t\t}\n\t}\n\tindex shift = 0;\n\tfor (index i = 0; i < numberOfColumns(); ++i) {\n\t\tindex temp = rowIdx[i];\n\t\trowIdx[i] = shift;\n\t\tshift = temp;\n\t}\n\trowIdx[numberOfColumns()] = nonZeros.size();\n\n\treturn CSRMatrix(nCols, nRows, rowIdx, columnIdx, nonZeros);\n}\n\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.5773633122444153, "alphanum_fraction": 0.6362865567207336, "avg_line_length": 21.046728134155273, "blob_id": "6493d9d9d844a2735b09450ec9d87991ffe4b753", "content_id": "d403421f14a20a64516c4e1319c0dd6eb9cd20cc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2359, "license_type": "permissive", "max_line_length": 82, "num_lines": 107, "path": "/networkit/cpp/flow/test/EdmondsKarpGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n* EdmondsKarpGTest.cpp\n *\n * Created on: 13.06.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"EdmondsKarpGTest.h\"\n\nnamespace NetworKit {\n\nTEST_F(EdmondsKarpGTest, testEdmondsKarpP1) {\n\tGraph G(7, false);\n\tG.addEdge(0,1);\n\tG.addEdge(0,2);\n\tG.addEdge(0,3);\n\tG.addEdge(1,2);\n\tG.addEdge(1,4);\n\tG.addEdge(2,3);\n\tG.addEdge(2,4);\n\tG.addEdge(3,4);\n\tG.addEdge(3,5);\n\tG.addEdge(4,6);\n\tG.addEdge(5,6);\n\n\tG.indexEdges();\n\n\tEdmondsKarp edKa(G, 0, 6);\n\tedKa.run();\n\tEXPECT_EQ(2, edKa.getMaxFlow()) << \"max flow is not correct\";\n\n\tEXPECT_EQ(1, edKa.getFlow(4, 6));\n\tEXPECT_EQ(1, edKa.getFlow(5, 6));\n\n\tstd::vector<node> sourceSet(edKa.getSourceSet());\n\n\tEXPECT_TRUE(std::find(sourceSet.begin(), sourceSet.end(), 0) != sourceSet.end());\n\tEXPECT_TRUE(std::find(sourceSet.begin(), sourceSet.end(), 1) != sourceSet.end());\n\tEXPECT_TRUE(std::find(sourceSet.begin(), sourceSet.end(), 2) != sourceSet.end());\n\tEXPECT_TRUE(std::find(sourceSet.begin(), sourceSet.end(), 3) != sourceSet.end());\n\tEXPECT_TRUE(std::find(sourceSet.begin(), sourceSet.end(), 4) != sourceSet.end());\n\n\tEXPECT_TRUE(std::find(sourceSet.begin(), sourceSet.end(), 5) == sourceSet.end());\n\tEXPECT_TRUE(std::find(sourceSet.begin(), sourceSet.end(), 6) == sourceSet.end());\n}\n\nTEST_F(EdmondsKarpGTest, testEdmondsKarpTwoPaths) {\n\tGraph G(11);\n\n\tG.addEdge(0, 1);\n\tG.addEdge(0, 2);\n\tG.addEdge(2, 3);\n\tG.addEdge(3, 4);\n\tG.addEdge(1, 4);\n\tG.addEdge(1, 5);\n\tG.addEdge(5, 6);\n\tG.addEdge(6, 7);\n\tG.addEdge(7, 8);\n\tG.addEdge(8, 9);\n\tG.addEdge(4, 10);\n\tG.addEdge(9, 10);\n\n\tG.indexEdges();\n\n\tEdmondsKarp edKa(G, 0, 10);\n\tedKa.run();\n\n\tEXPECT_EQ(2, edKa.getMaxFlow());\n\tEXPECT_EQ(0, edKa.getFlow(1, 4));\n}\n\nTEST_F(EdmondsKarpGTest, testEdmondsKarpP2) {\n\tGraph G(6, true);\n\tG.addEdge(0,1, 5);\n\tG.addEdge(0,2, 15);\n\tG.addEdge(1,3, 5);\n\tG.addEdge(1,4, 5);\n\tG.addEdge(2,3, 5);\n\tG.addEdge(2, 4, 5);\n\tG.addEdge(3,5, 15);\n\tG.addEdge(4,5, 5);\n\n\tG.indexEdges();\n\n\tEdmondsKarp edKa(G, 0, 5);\n\tedKa.run();\n\n\tEXPECT_EQ(15, edKa.getMaxFlow()) << \"max flow is not correct\";\n}\n\nTEST_F(EdmondsKarpGTest, testEdmondsKarpUnconnected) {\n\tGraph G(6, true);\n\tG.addEdge(0,1, 5);\n\tG.addEdge(0,2, 15);\n\tG.addEdge(1,2, 5);\n\tG.addEdge(3, 4, 5);\n\tG.addEdge(3,5, 15);\n\tG.addEdge(4,5, 5);\n\n\tG.indexEdges();\n\n\tEdmondsKarp edKa(G, 0, 5);\n\tedKa.run();\n\tEXPECT_EQ(0, edKa.getMaxFlow()) << \"max flow is not correct\";\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6377629637718201, "alphanum_fraction": 0.6459723114967346, "avg_line_length": 20.655555725097656, "blob_id": "d901a19d107ab2a9b1cc7340b107ae4edcc13890", "content_id": "d8197d77d79e95ac172381d91a927d703507f087", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1949, "license_type": "permissive", "max_line_length": 74, "num_lines": 90, "path": "/networkit/cpp/spanning/PseudoRandomSpanningTree.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PseudoRandomSpanningTree.cpp\n *\n * Created on: 20.06.2015\n * Author: Henning\n */\n\n#include \"PseudoRandomSpanningTree.h\"\n#include \"../structures/UnionFind.h\"\n#include \"../auxiliary/Random.h\"\n\nnamespace NetworKit {\n\nPseudoRandomSpanningTree::PseudoRandomSpanningTree(const Graph& G): g(G) {\n\n}\n\nvoid PseudoRandomSpanningTree::run() {\n\tcount n = g.numberOfNodes();\n\tGraph randTree(n);\n\tUnionFind uf(n);\n\n\t// sort edges in decreasing weight order\n\tstd::vector<MyEdge> sortedEdges;\n\tg.forEdges([&](node u, node v, edgeweight ew) {\n\t\tdouble randVal = 1e-6 * (1.0 - 2.0 * Aux::Random::probability());\n\n\t\tMyEdge myEdge;\n\t\tmyEdge.from = u;\n\t\tmyEdge.to = v;\n\t\tmyEdge.weight = ew + randVal;\n\t\tsortedEdges.push_back(myEdge);\n\t});\n\tstd::sort(sortedEdges.begin(), sortedEdges.end());\n\n\t// process in decreasing weight order\n\tfor (auto e: sortedEdges) {\n\t\tnode u = e.from;\n\t\tnode v = e.to;\n//\t\tINFO(\"process edge (\", u, \", \", v, \") with weight \", e.weight);\n\n\t\t// if edge does not close cycle, add it to tree\n\t\tif (uf.find(u) != uf.find(v)) {\n\t\t\trandTree.addEdge(u, v);\n\t\t\tuf.merge(u, v);\n\t\t}\n\t}\n\n\ttree = randTree;\n}\n\nvoid PseudoRandomSpanningTree::runShuffle() {\n\n\t// TODO: handle disconnected graphs\n\n\tcount n = g.numberOfNodes();\n\tGraph randTree(n);\n\tUnionFind uf(n);\n\n\t// prepare array to be shuffled\n\tstd::vector<std::pair<node, node> > multEdges;\n\tg.forEdges([&](node u, node v) {\n\t\tcount mult = 1; // g.degree(u) + g.degree(v);\n\t\tfor (index i = 0; i < mult; ++i) {\n\t\t\tmultEdges.push_back(std::make_pair(u, v));\n\t\t}\n\t});\n\n\n\t// shuffle edges to get their processing order\n\tstd::random_shuffle(multEdges.begin(), multEdges.end());\n\tfor (auto e: multEdges) {\n\t\tnode u = e.first;\n\t\tnode v = e.second;\n\n\t\t// if edge does not close cycle, add it to tree\n\t\tif (uf.find(u) != uf.find(v)) {\n\t\t\trandTree.addEdge(u, v);\n\t\t\tuf.merge(u, v);\n\t\t}\n\t}\n\n\ttree = randTree;\n}\n\nGraph PseudoRandomSpanningTree::getTree() {\n\treturn tree;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6678445339202881, "alphanum_fraction": 0.6837455630302429, "avg_line_length": 24.727272033691406, "blob_id": "dc8c48f6cd5ea2b621c80dea036c2e4e2fd66688", "content_id": "05b60293a530c2208dbde336f31db72be0b0c6be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 566, "license_type": "permissive", "max_line_length": 166, "num_lines": 22, "path": "/networkit/cpp/algebraic/DiagonalMatrix.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DiagonalMatrix.cpp\n *\n * Created on: 13.11.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"DiagonalMatrix.h\"\n\nnamespace NetworKit {\n\nDiagonalMatrix::DiagonalMatrix(const count dimension, const std::vector<double> &values) : Matrix(dimension) {\n\tif (values.size() != dimension) {\n\t\tthrow std::runtime_error(\"DiagonalMatrix::DiagonalMatrix(count dimension, std::vector<double> values): dimension of values does not match the specified dimension\");\n\t}\n\n\tfor (index i = 0; i < dimension; ++i) {\n\t\tsetValue(i, i, values[i]);\n\t}\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6297376155853271, "alphanum_fraction": 0.6530612111091614, "avg_line_length": 11.703703880310059, "blob_id": "febe36ea16de65328fed399648226d0363a5993e", "content_id": "d1fdbbaa5de0ea988ef4fb7ff3a7ab73d03f3809", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 343, "license_type": "permissive", "max_line_length": 44, "num_lines": 27, "path": "/networkit/cpp/structures/test/PartitionGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PartitionGTest.h\n *\n * Created on: 04.12.2013\n * Author: Maximilian Vogel ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef PARTITIONGTEST_H_\n#define PARTITIONGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass PartitionGTest: public testing::Test {\n\n};\n\n\n\n\n} /* namespace NetworKit */\n#endif /* PARTITIONGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.481850802898407, "alphanum_fraction": 0.5003988742828369, "avg_line_length": 27.16853904724121, "blob_id": "eca548e0793c3371b08bf6613264125be4862a29", "content_id": "226420460401d5ee34d7c022ebefa47f95e01a1d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5014, "license_type": "permissive", "max_line_length": 109, "num_lines": 178, "path": "/networkit/cpp/viz/MaxentStress.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MaxentStress.cpp\n *\n * Created on: 22.01.2014\n * Author: Henning\n */\n\n#include \"MaxentStress.h\"\n#include \"../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\nMaxentStress::MaxentStress(Point<float> bottomLeft, Point<float> topRight, bool useGivenLayout):\n\t\t\t\tLayouter(bottomLeft, topRight, useGivenLayout)\n{\n\n}\n\n\nvoid MaxentStress::draw(Graph& G) {\n\tcount n = G.numberOfNodes();\n\tinitialize(G);\n\n\t//////////////////////////////////////////////////////////\n\t// Force calculations\n\t//////////////////////////////////////////////////////////\n\n\n\n\t//////////////////////////////////////////////////////////\n\t// Move vertices according to forces\n\t//////////////////////////////////////////////////////////\n\tauto move([&](Point<float>& p, Point<float>& force, float step) {\n\t\t// x_i := x_i + step * (f / ||f||)\n\t\tfloat len = force.length();\n\t\tif (len > 0) {\n\t\t\tp += force.scale(step / len);\n\t\t}\n\n\t\t// position inside frame\n\t\tp[0] = fmax(p[0], 0.0);\n\t\tp[1] = fmax(p[1], 0.0);\n\t\tp[0] = fmin(p[0], 1.0);\n\t\tp[1] = fmin(p[1], 1.0);\n\t});\n\n\n\t//////////////////////////////////////////////////////////\n\t// Cooling schedule\n\t//////////////////////////////////////////////////////////\n\n\n\t//////////////////////////////////////////////////////////\n\t// Check convergence\n\t//////////////////////////////////////////////////////////\n\tauto isConverged([&](std::vector<Point<float> >& oldLayout,\n\t\t\tstd::vector<Point<float> >& newLayout) {\n\t\tfloat change = 0.0;\n\n\t\tfor (index i = 0; i < oldLayout.size(); ++i) {\n\t\t\tchange += oldLayout[i].distance(newLayout[i]); // could be accelerated by squared distance\n\t\t}\n\t\tDEBUG(\"change: \", change);\n\n\t\treturn (change < 0.1); // FIXME: externalize\n\t});\n\n\n\n\t//////////////////////////////////////////////////////////\n\t// Preparations for main loop\n\t//////////////////////////////////////////////////////////\n\tbool converged = false;\n\tstd::vector<float> origin = {0.0, 0.0};\n\tstd::vector<Point<float> > forces(n, origin);\n\tcount iter = 0;\n\n\t//////////////////////////////////////////////////////////\n\t// Main loop\n\t//////////////////////////////////////////////////////////\n\n\t// Paper: q = 0 for many graphs, q = 0.8 for graphs with many degree-1 nodes\n\t// alpha: initially 1, then in each iteration alpha := 0.3 * alpha\n\tfloat q = 0;\n\tfloat alpha = 1.0;\n\tAlgebraicDistanceIndex algdist(G, 5, 10);\n\talgdist.preprocess();\n\n\twhile (! converged) {\n\t\tstd::vector<Point<float> > previousLayout = layout;\n\n\t\t// init for current iteration\n\t\tG.parallelForNodes([&](node u) {\n\t\t\tforces[u] = origin;\n\t\t});\n\n\t\t// apply forces to each node\n\t\tG.forNodes([&](node u) {\n\t\t\t// FIXME: take care of singletons...\n\t\t\tassert(G.weightedDegree(u) > 0.0);\n\n\t\t\tPoint<float> uPoint = layout[u];\n\t\t\tPoint<float> attractiveForce(0.0, 0.0);\n\t\t\tPoint<float> repulsiveForce(0.0, 0.0);\n\t\t\tfloat distSum = 0.0;\n\t\t\tDEBUG(\"node \", u, \"; #neighbors: \", G.degree(u));\n\n\t\t\tG.forNodes([&](node v) {\n\t\t\t\tif (u < v) { // only unordered pairs\n\t\t\t\t\tPoint<float> vPoint = layout[v];\n\t\t\t\t\tfloat diffX = uPoint[0] - vPoint[0];\n\t\t\t\t\tfloat diffY = uPoint[1] - vPoint[1];\n\t\t\t\t\tPoint<float> diffVec(diffX, diffY);\n\t\t\t\t\tfloat len = diffVec.length();\n\t\t\t\t\tDEBUG(\"|diff| \", u, \" - \", v, \": \", len);\n\n\t\t\t\t\tif (len > 0.0) {\n\t\t\t\t\t\tif (G.hasEdge(u, v)) {\n\t\t\t\t\t\t\t// sum over all node pairs in S\n\t\t\t\t\t\t\t// $\\frac{1}{\\rho_i} \\sum_{i,j \\in S} w_{ij} * (x_j + d_{ij} \\frac{x_i - x_j}{\\Vert x_i - x_j \\Vert})$\n\t\t\t\t\t\t\t\tfloat dist = 1.0; // algdist.distance(u, v);\n\t\t\t\t\t\t\t\tdistSum += dist;\n\t\t\t\t\t\t\t\tDEBUG(\"algdist \", u, \" - \", v, \": \", dist);\n\t\t\t\t\t\t\t\tdiffVec.scale(dist / len);\n\t\t\t\t\t\t\t\tdiffVec += vPoint;\n\t\t\t\t\t\t\t\tattractiveForce += diffVec.scale(1.0 / (dist * dist));\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse {\n\t\t\t\t\t\t\t// traverse remaining vertices not in neighborhood for repulsive forces\n\t\t\t\t\t\t\t// sum over all node pairs not in S\n\t\t\t\t\t\t\t// $\\frac{\\alpha}{\\rho_i} \\sum_{i,j \\notin S} w_{ij} \\frac{x_i - x_j}{\\Vert x_i - x_j \\Vert^{q+2}}$\n\t\t\t\t\t\t\t// TODO: approximation (e.g. Barnes-Hut)\n\n\t\t\t\t\t\t\tfloat denom = 1.0 / pow(len, q+2);\n\t\t\t\t\t\t\tdiffVec.scale(denom);\n\t\t\t\t\t\t\trepulsiveForce += diffVec;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t});\n\n\t\t\t// apply forces to node u\n\t\t\tstd::cout.flush();\n\t\t\tassert(distSum != 0.0);\n\t\t\tfloat rhoInv = 1.0 / distSum;\n\t\t\tattractiveForce.scale(rhoInv);\n\t\t\trepulsiveForce.scale(alpha * rhoInv);\n\n\t\t\tforces[u] += attractiveForce;\n\t\t\tforces[u] += repulsiveForce;\n\n\t\t\tif (alpha > 0.008) {\n\t\t\t\talpha = 0.3 * alpha;\n\t\t\t}\n\t\t});\n\n\n\t\t// move nodes\n\t\tG.parallelForNodes([&](node u) {\n\t\t\tmove(layout[u], forces[u], 1.0); // FIXME: step length\n\n\t\t\tDEBUG(\"moved \", u, \" by: \", forces[u][0], \" and \", forces[u][1]);\n\t\t\tDEBUG(\"old pos of \", u, \": \", previousLayout[u].toString(), \", new pos: \", layout[u].toString());\n\t\t});\n\n\t\t++iter;\n\t\tconverged = isConverged(previousLayout, layout) || iter >= 1000; // FIXME: externalize\n\t\tDEBUG(\"iteration finished: \", iter, \"; converged: \", converged);\n\t}\n\n\t// copy layout into graph\n\tG.parallelForNodes([&](node u) {\n\t\tG.setCoordinate(u, layout[u]);\n\t\tDEBUG(\"coordinate of \", u, \": \", layout[u].toString());\n\t});\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.5883738994598389, "alphanum_fraction": 0.6194030046463013, "avg_line_length": 25.33793067932129, "blob_id": "4103fb5a309d8ece9b687d151261c56a68ef6948", "content_id": "67ee5af36b942a7e7541d555c40832c3a3f0ed88", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7638, "license_type": "permissive", "max_line_length": 120, "num_lines": 290, "path": "/networkit/cpp/graph/test/DynSSSPGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * dynSSSPGTest.cpp\n *\n * Created on: 21.07.2014\n * Author: ebergamini\n */\n\n#include \"DynSSSPGTest.h\"\n#include \"../DynBFS.h\"\n#include \"../BFS.h\"\n#include \"../DynDijkstra.h\"\n#include \"../Dijkstra.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../auxiliary/Log.h\"\n#include \"../../generators/DorogovtsevMendesGenerator.h\"\n#include \"../../graph/Sampling.h\"\n#include <random>\n\n\nnamespace NetworKit {\n\nTEST_F(DynSSSPGTest, testDynamicBFS_1edge) {\n/* Graph:\n 0 3 6\n \\ / \\ /\n 2 5\n / \\ / \\\n 1 4 7\n */\n\tcount n = 8;\n\tGraph G(n);\n\n\tG.addEdge(0, 2);\n\tG.addEdge(1, 2);\n\tG.addEdge(2, 3);\n\tG.addEdge(2, 4);\n\tG.addEdge(3, 5);\n\tG.addEdge(4, 5);\n\tG.addEdge(5, 6);\n\tG.addEdge(5, 7);\n\n\tBFS bfs(G, 0);\n\tbfs.run();\n\tDynBFS dbfs(G, 0);\n\tdbfs.run();\n\tstd::vector<GraphEvent> batch(1);\n\tbatch[0].type = GraphEvent::EDGE_ADDITION;\n\tbatch[0].u = 0;\n\tbatch[0].v = 6;\n\tbatch[0].w = 1.0;\n\tfor (GraphEvent edge : batch) {\n\t\tG.addEdge(edge.u, edge.v, edge.w);\n\t}\n\tdbfs.update(batch);\n\tbfs.run();\n\tG.forNodes([&] (node i) {\n\t\tEXPECT_EQ(bfs.distance(i), dbfs.distance(i));\n\t\tEXPECT_EQ(bfs.numberOfPaths(i), dbfs.numberOfPaths(i));\n\t});\n}\n\nTEST_F(DynSSSPGTest, testDynamicBFS_batch) {\n/* Graph:\n\t\t0 3 6\n\t\t\\ / \\ /\n\t\t\t2 5\n\t\t/ \\ / \\\n\t\t1 4 7\n*/\n\tcount n = 8;\n\tGraph G(n);\n\n\tG.addEdge(0, 2);\n\tG.addEdge(1, 2);\n\tG.addEdge(2, 3);\n\tG.addEdge(2, 4);\n\tG.addEdge(3, 5);\n\tG.addEdge(4, 5);\n\tG.addEdge(5, 6);\n\tG.addEdge(5, 7);\n\n\tBFS bfs(G, 0);\n\tbfs.run();\n\tDynBFS dbfs(G, 0);\n\tdbfs.run();\n\tstd::vector<GraphEvent> batch(3);\n\tbatch[0].type = GraphEvent::EDGE_ADDITION;\n\tbatch[0].u = 3;\n\tbatch[0].v = 7;\n\tbatch[0].w = 1.0;\n\tbatch[1].type = GraphEvent::EDGE_ADDITION;\n\tbatch[1].u = 0;\n\tbatch[1].v = 5;\n\tbatch[1].w = 1.0;\n\tbatch[2].type = GraphEvent::EDGE_ADDITION;\n\tbatch[2].u = 2;\n\tbatch[2].v = 7;\n\tbatch[2].w = 1.0;\n\tfor (GraphEvent edge : batch) {\n\t\tG.addEdge(edge.u, edge.v, edge.w);\n\t}\n\tdbfs.update(batch);\n\tbfs.run();\n\tG.forNodes([&] (node i) {\n\t\tEXPECT_EQ(bfs.distance(i), dbfs.distance(i));\n\t\tEXPECT_EQ(bfs.numberOfPaths(i), dbfs.numberOfPaths(i));\n\t});\n\n}\n\n\nTEST_F(DynSSSPGTest, testDynamicDijkstra) {\n /* Graph:\n 0 3 6\n \\ / \\ /\n 2 -- 5\n / \\ / \\\n 1 4 7\n\n Edges in the upper row have weight 3,\n the edge in the middle row has weight 1.5,\n edges in the lower row have weight 2.\n */\n\tcount n = 8;\n\tGraph G(n, true);\n\n\tG.addEdge(0, 2, 3);\n\tG.addEdge(1, 2, 2);\n\tG.addEdge(2, 3, 3);\n\tG.addEdge(2, 4, 2);\n\tG.addEdge(2, 5, 1.5);\n\tG.addEdge(3, 5, 3);\n\tG.addEdge(4, 5, 2);\n\tG.addEdge(5, 6, 3);\n\tG.addEdge(5, 7, 2);\n\n\tDijkstra dij(G, 0);\n\tdij.run();\n\tDynDijkstra ddij(G, 0);\n\tddij.run();\n\tstd::vector<GraphEvent> batch(3);\n\tbatch[0].type = GraphEvent::EDGE_ADDITION;\n\tbatch[0].u = 0;\n\tbatch[0].v = 4;\n\tbatch[0].w = 1.0;\n\tbatch[1].type = GraphEvent::EDGE_ADDITION;\n\tbatch[1].u = 1;\n\tbatch[1].v = 4;\n\tbatch[1].w = 1.0;\n\tbatch[2].type = GraphEvent::EDGE_ADDITION;\n\tbatch[2].u = 6;\n\tbatch[2].v = 7;\n\tbatch[2].w = 3.0;\n\tfor (GraphEvent edge : batch) {\n\t\tG.addEdge(edge.u, edge.v, edge.w);\n\t}\n\tddij.update(batch);\n\tdij.run();\n\tG.forNodes([&] (node i) {\n\t\tEXPECT_EQ(dij.distance(i), ddij.distance(i));\n\t\tEXPECT_EQ(dij.numberOfPaths(i), ddij.numberOfPaths(i));\n\t});\n\n}\n\nTEST_F(DynSSSPGTest, testDynamicBFSGeneratedGraph) {\n\tMETISGraphReader reader;\n\tDorogovtsevMendesGenerator generator(1000);\n\tGraph G = generator.generate();\n\tDEBUG(\"Generated graph of dimension \", G.upperNodeIdBound());\n\tDynBFS dyn_bfs(G, 0);\n\tBFS bfs(G, 0);\n\tdyn_bfs.run();\n\tbfs.run();\n\tDEBUG(\"Before the edge insertion: \");\n\tcount nInsertions = 1000, i = 0;\n\twhile (i < nInsertions) {\n\t\tDEBUG(\"Sampling a new edge\");\n\t\tnode v1 = Sampling::randomNode(G);\n\t\tnode v2 = Sampling::randomNode(G);\n\t\tif (v1 != v2 && !G.hasEdge(v1, v2)) {\n\t\t\ti++;\n\t\t\tDEBUG(\"Adding edge number \", i);\n\t\t\tG.addEdge(v1, v2);\n\t\t\tstd::vector<GraphEvent> batch;\n\t\t\tbatch.push_back(GraphEvent(GraphEvent::EDGE_ADDITION, v1, v2, 1.0));\n\t\t\tDEBUG(\"Running update with dynamic bfs\");\n\t\t\tdyn_bfs.update(batch);\n\t\t\tDEBUG(\"Running from scratch with bfs\");\n\t\t\tbfs.run();\n\t\t\tG.forNodes([&] (node i) {\n\t\t\t//\tstd::cout<<\"Node \"<<i<<\":\"<<std::endl;\n\t\t\t//\tstd::cout<<\"Actual distance: \"<<dij.distance(i)<<\", computed distance: \"<<ddij.distance(i)<<std::endl;\n\t\t\t//\tstd::cout<<\"Actual number of paths: \"<<dij.numberOfPaths(i)<<\", computed one: \"<<ddij.numberOfPaths(i)<<std::endl;\n\t\t\t\tEXPECT_EQ(dyn_bfs.distance(i), bfs.distance(i));\n\t\t\t\tEXPECT_EQ(dyn_bfs.numberOfPaths(i), bfs.numberOfPaths(i));\n\t\t\t});\n\t\t}\n\t}\n}\n\nTEST_F(DynSSSPGTest, testDynamicDijkstraGeneratedGraph) {\n\tMETISGraphReader reader;\n\tDorogovtsevMendesGenerator generator(1000);\n\tGraph G1 = generator.generate();\n\tGraph G(G1, true, false);\n\tDEBUG(\"Generated graph of dimension \", G.upperNodeIdBound());\n\tDynDijkstra dyn_dij(G, 0);\n\tDijkstra dij(G, 0);\n\tdyn_dij.run();\n\tdij.run();\n\tDEBUG(\"Before the edge insertion: \");\n\tcount nInsertions = 10, i = 0;\n\twhile (i < nInsertions) {\n\t\tDEBUG(\"Sampling a new edge\");\n\t\tnode v1 = Sampling::randomNode(G);\n\t\tnode v2 = Sampling::randomNode(G);\n\t\tif (v1 != v2 && !G.hasEdge(v1, v2)) {\n\t\t\ti++;\n\t\t\tDEBUG(\"Adding edge number \", i);\n\t\t\tG.addEdge(v1, v2);\n\t\t\tstd::vector<GraphEvent> batch;\n\t\t\tbatch.push_back(GraphEvent(GraphEvent::EDGE_ADDITION, v1, v2, 1.0));\n\t\t\tDEBUG(\"Running update with dynamic dijkstra\");\n\t\t\tdyn_dij.update(batch);\n\t\t\tDEBUG(\"Running from scratch with dijkstra\");\n\t\t\tdij.run();\n\t\t\tG.forNodes([&] (node i) {\n\t\t\t//\tstd::cout<<\"Node \"<<i<<\":\"<<std::endl;\n\t\t\t//\tstd::cout<<\"Actual distance: \"<<dij.distance(i)<<\", computed distance: \"<<ddij.distance(i)<<std::endl;\n\t\t\t//\tstd::cout<<\"Actual number of paths: \"<<dij.numberOfPaths(i)<<\", computed one: \"<<ddij.numberOfPaths(i)<<std::endl;\n\t\t\t\tEXPECT_EQ(dyn_dij.distance(i), dij.distance(i));\n\t\t\t\tEXPECT_EQ(dyn_dij.numberOfPaths(i), dij.numberOfPaths(i));\n\t\t\t});\n\t\t}\n\t}\n}\n\nTEST_F(DynSSSPGTest, testDynamicDijkstraBatches) {\n\tMETISGraphReader reader;\n\tstd::default_random_engine random_generator;\n \tstd::normal_distribution<double> distribution(100,10);\n\tDorogovtsevMendesGenerator generator(100);\n\tGraph G1 = generator.generate();\n\tGraph G(G1, true, false);\n\tDEBUG(\"Generated graph of dimension \", G.upperNodeIdBound());\n\t// add random normal weights to G\n\n\tG.forNodes([&] (node source) {\n\t\tDynDijkstra dyn_dij(G, source, true);\n\t\tDijkstra dij(G, source);\n\t\tdyn_dij.run();\n\t\tdij.run();\n\t\tDEBUG(\"Before the edge insertion: \");\n\t\tcount batchSize = 8;\n\t\tcount nBatches = 1, i = 0;\n\t\tfor (count j=0; j<nBatches; j++) {\n\t\t\tstd::vector<GraphEvent> batch;\n\t\t\ti = 0;\n\t\t\twhile (i < batchSize) {\n\t\t\t\tDEBUG(\"Sampling a new edge\");\n\t\t\t\tnode v1 = Sampling::randomNode(G);\n\t\t\t\tnode v2 = Sampling::randomNode(G);\n\t\t\t\tif (v1 != v2 && !G.hasEdge(v1, v2)) {\n\t\t\t\t\ti++;\n\t\t\t\t\tdouble number = distribution(random_generator);\n\t\t\t\t\tG.addEdge(v1, v2, number);\n\t\t\t\t\tbatch.push_back(GraphEvent(GraphEvent::EDGE_ADDITION, v1, v2, number));\n\t\t\t\t}\n\t\t\t}\n\t\t\tDEBUG(\"batch size: \", batch.size());\n\t\t\tDEBUG(\"Updating with dynamic dijkstra\");\n\t\t\tdyn_dij.update(batch);\n\t\t\tDEBUG(\"Running from scratch with dijkstra\");\n\t\t\tdij.run();\n\t\t\tG.forNodes([&] (node i) {\n\t\t\t//\tstd::cout<<\"Node \"<<i<<\":\"<<std::endl;\n\t\t\t//\tstd::cout<<\"Actual distance: \"<<dij.distance(i)<<\", computed distance: \"<<ddij.distance(i)<<std::endl;\n\t\t\t//\tstd::cout<<\"Actual number of paths: \"<<dij.numberOfPaths(i)<<\", computed one: \"<<ddij.numberOfPaths(i)<<std::endl;\n\t\t\t\tEXPECT_EQ(dyn_dij.distance(i), dij.distance(i));\n\t\t\t\tEXPECT_EQ(dyn_dij.numberOfPaths(i), dij.numberOfPaths(i));\n\t\t\t\tif (i != source)\n\t\t\t\t\tassert(dyn_dij.distance(i) != 0);\n\t\t\t//\tEXPECT_EQ(dyn_dij.getPredecessors(i).size(), dij.getPredecessors(i).size());\n\t\t\t});\n\t\t}\n\t});\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6243821978569031, "alphanum_fraction": 0.6326194405555725, "avg_line_length": 25.39130401611328, "blob_id": "72901e7cf04a15e868c02434afdf2a8ae562c735", "content_id": "ab18fba6f18fed46908b7168569441fbb22922fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1214, "license_type": "permissive", "max_line_length": 116, "num_lines": 46, "path": "/Run.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport fnmatch\nfrom datetime import datetime\n\n# run command on all graphs\nargs = sys.argv[1:]\n\ncommandTemplate = args[0]\ndir = args[1]\nif len(args) > 2:\n runs = int(args[2])\nelse:\n runs = 1\n \nprint(\"performing %d runs each\" % runs)\n\ngraphFiles = []\nos.chdir(\"/Users/forigem/Downloads/binary_networks\")\nfor (dirpath, dirnames, filenames) in os.walk(dir):\n for name in fnmatch.filter(filenames, \"*.graph\"):\n path = os.path.join(dirpath, name)\n graphFiles.append(path)\n \ngraphFiles.sort(key=lambda s: s.lower()) # sort case-insensitively\n\n \ncommands = []\nfor graphFile in graphFiles:\n # outFile = \"output/%s-%s.txt\" % (graphFile.split(\"/\")[-1].split(\".\")[0], str(datetime.now()).replace(\" \", \"-\"))\n command = commandTemplate % {\"graphFile\" : graphFile}\n # command = \"%s &> '%s'\" % (command, outFile)\n commands.append(command)\n \nprint(\"Going to call the following %d commands:\" % len(commands))\nfor command in commands:\n print(\"\\t %s\" % command)\n\ncalled = 0\nfor command in commands:\n for r in range(runs):\n print(\"[BEGIN] %s\" % command)\n os.system(command)\n called += 1\n \nprint(\"[DONE] called %d commands\" % called)\n" }, { "alpha_fraction": 0.6717557311058044, "alphanum_fraction": 0.6839694380760193, "avg_line_length": 13.909090995788574, "blob_id": "e81d21c4aa5e942c1f2526dd359345569e34ae85", "content_id": "cb4cdb521d541e386fabd3b1c8fc428c11af5ab7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 655, "license_type": "permissive", "max_line_length": 53, "num_lines": 44, "path": "/networkit/cpp/linkprediction/test/LinkPredictionGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CommonNeighboursIndex.h\n *\n * Created on: 07.12.2014\n * Author: Kolja Esders ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef LINKPREDICTIONGTEST_H_\n#define LINKPREDICTIONGTEST_H_\n\n#include <gtest/gtest.h>\n\n#include \"../LinkPredictor.h\"\n#include \"../../graph/Graph.h\"\n\nnamespace NetworKit {\n\nclass LinkPredictionGTest : public testing::Test {\n\nprotected:\n Graph G;\n\n Graph trainingGraph;\n\n std::vector<std::pair<node, node>> missingLinks;\n\n std::vector<LinkPredictor::prediction> predictions;\n\npublic:\n LinkPredictionGTest();\n\n void SetUp();\n\n //void TearDown();\n\n};\n\n} // namespace NetworKit\n\n#endif /* LINKPREDICTIONGTEST_H_ */\n\n#endif /* NOGTEST */" }, { "alpha_fraction": 0.6340909004211426, "alphanum_fraction": 0.6522727012634277, "avg_line_length": 18.954545974731445, "blob_id": "55fb62ac7310f9d72e44418586b930e58448bf16", "content_id": "1859dc630527bb0d8b87e68d07d048ba876490b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 440, "license_type": "permissive", "max_line_length": 89, "num_lines": 22, "path": "/networkit/cpp/algebraic/AdjacencyMatrix.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * AdjacencyMatrix.cpp\n *\n * Created on: 28.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"AdjacencyMatrix.h\"\n\nnamespace NetworKit {\n\nAdjacencyMatrix::AdjacencyMatrix(const Graph &graph) : Matrix(graph.upperNodeIdBound()) {\n\tgraph.forEdges([&](const node &i, const node &j, double edgeWeight) {\n\t\tsetValue(i, j, edgeWeight);\n\t\tif (!graph.isDirected()) {\n\t\t\tsetValue(j, i, edgeWeight);\n\t\t}\n\t});\n}\n\n\n} /* namespace NetworKit */\n\n" }, { "alpha_fraction": 0.6933333277702332, "alphanum_fraction": 0.7051851749420166, "avg_line_length": 15.071428298950195, "blob_id": "865cde175348041727db1a27678458f07f2c5f84", "content_id": "f6ccb5129bf4797c9547d52ebbc7a1f3845663d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 675, "license_type": "permissive", "max_line_length": 47, "num_lines": 42, "path": "/networkit/cpp/spanning/PseudoRandomSpanningTree.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PseudoRandomSpanningTree.h\n *\n * Created on: 20.06.2015\n * Author: Henning\n */\n\n#ifndef PSEUDORANDOMSPANNINGTREE_H_\n#define PSEUDORANDOMSPANNINGTREE_H_\n\n#include \"../graph/Graph.h\"\n\nnamespace NetworKit {\n\nstruct MyEdge {\n\tnode from;\n\tnode to;\n\tedgeweight weight;\n\tbool operator<(const MyEdge& other) const {\n\t\treturn this->weight >= other.weight; // FIXME\n\t}\n};\n\nclass PseudoRandomSpanningTree {\npublic:\n\tPseudoRandomSpanningTree(const Graph& G);\n\tvirtual ~PseudoRandomSpanningTree() = default;\n\n\tvoid runShuffle();\n\n\tvoid run();\n\n\tGraph getTree();\n\n\nprivate:\n\tconst Graph& g;\n\tGraph tree;\n};\n\n} /* namespace NetworKit */\n#endif /* PSEUDORANDOMSPANNINGTREE_H_ */\n" }, { "alpha_fraction": 0.5193929076194763, "alphanum_fraction": 0.5703578591346741, "avg_line_length": 25.954545974731445, "blob_id": "ef3c99ed349a65d7d5b90d68453c0f281cbabe32", "content_id": "8088204ce6c54de9d5af6569a0b4e4d61f14d32c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5337, "license_type": "permissive", "max_line_length": 92, "num_lines": 198, "path": "/networkit/cpp/viz/PostscriptWriter.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PostscriptWriter.cpp\n *\n * Created on: Apr 10, 2013\n * Author: Henning\n */\n\n#include \"PostscriptWriter.h\"\n#include \"../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\nPostscriptWriter::PostscriptWriter(bool isTorus) : wrapAround(isTorus) {\n\tnumColors = 24;\n\n\t// set colors in RGB format, where 0.0 is no color and 1.0 is full color\n\t// (1.0 would mean 255 in a 3x8 bit color scheme)\n\tpsColor = { {1.0, 0.0, 0.0},\n\t\t{\t1.0, 0.5, 0.0}, {1.0, 1.0, 0.0}, {0.5, 1.0, 0.0}, {0.0, 1.0,\n\t\t\t0.0}, {0.0, 1.0, 0.5}, {0.0, 1.0, 1.0}, {0.0, 0.5, 1.0},\n\t\t{\t0.0, 0.0, 1.0}, {0.5, 0.0, 1.0}, {1.0, 0.0, 1.0}, {1.0, 0.0,\n\t\t\t0.5},\n\n\t\t{\t0.6, 0.0, 0.0}, {0.6, 0.3, 0.0}, {0.6, 0.6, 0.0}, {0.3, 0.6,\n\t\t\t0.0}, {0.0, 0.6, 0.0}, {0.0, 0.6, 0.3}, {0.0, 0.6, 0.6},\n\t\t{\t0.0, 0.3, 0.6}, {0.0, 0.0, 0.6}, {0.3, 0.0, 0.6}, {0.6, 0.0,\n\t\t\t0.6}, {0.6, 0.0, 0.3}};\n\n\t// bounding box size\n\tps_size = {1020.0, 1020.0};\n}\n\nvoid PostscriptWriter::writeHeader(std::ofstream& file) {\n\t/* Header */\n\tif (wrapAround) {\n\t\tfile << \"%!PS-Adobe-3.0 EPSF-3.0\\n\";\n\t} else {\n\t\tfile << \"%!PS-Adobe-1.0\\n\";\n\t}\n\tfile << \"%%Title: NetworKit visualization\\n\";\n\tfile << \"%%BoundingBox: 0.000 0.000 \" << ps_size[0] << \" \" << ps_size[1] << \"\\n\";\n\tfile << \"%%EndComments\\n\";\n\tif (! wrapAround) {\n\t\tfile << \"%%EndProlog\\n\";\n\t\tfile << \"gsave\\n\";\n\t}\n}\n\nvoid PostscriptWriter::writeMacros(std::ofstream& file) {\n\t/* Macros */\n\tfile << \"/p {newpath} bind def\\n\";\n\tfile << \"/m {moveto} bind def\\n\";\n\tfile << \"/r {rmoveto} bind def\\n\";\n\tfile << \"/k {rlineto} bind def\\n\";\n\tfile << \"/l {lineto} bind def\\n\";\n\tfile << \"/n {rlineto} bind def\\n\";\n\tfile << \"/c {setrgbcolor} bind def\\n\";\n\tfile << \"/s {stroke} bind def\\n\";\n\tfile << \"/w {setlinewidth} bind def\\n\";\n\tfile << \"/h {show} bind def\\n\";\n\tfile << \"/a {arc closepath fill} bind def\\n\";\n\tfile << \"/b {closepath eofill} bind def\\n\";\n}\n\n// TODO: node and edge weights and thicker nodes/edges\nvoid PostscriptWriter::writeClustering(Graph& g, Partition& clustering, std::ofstream& file)\n{\n\t/////////////////////////////////\n\t// bounding box adjustment\n\t/////////////////////////////////\n\tps_min = {g.minCoordinate(0), g.minCoordinate(1)};\n\tps_max = {g.maxCoordinate(0), g.maxCoordinate(1)};\n\tPoint<float> ps_stretch = {ps_size[0] - 2 * ps_border[0], ps_size[1] - 2 * ps_border[1]};\n\n\tTRACE(\"min: \", ps_min.toCsvString());\n\tTRACE(\"max: \", ps_max.toCsvString());\n\tTRACE(\"stretch: \", ps_stretch.toCsvString());\n\n\tauto adjustToBoundingBox([&](Point<float> p) {\n\t\tfor (index c = 0; c < 2; ++c) {\n\t\t\tp[c] -= ps_min[c];\n\t\t\tp[c] *= ps_stretch[c] / (ps_max[c] - ps_min[c]);\n\t\t\tp[c] += ps_border[c];\n\t\t}\n//\t\tTRACE(\"New coordinate: \", p.toCsvString());\n\t\treturn p;\n\t});\n\n\n\t/////////////////////////////////\n\t// wrap-around adjustment\n\t/////////////////////////////////\n\tauto adjust1([&](float& val) { // TODO: externalize constants\n\t\tif (val > 500.0f) {\n\t\t\tval -= 1000.0f;\n\t\t}\n\t\telse if (val < -500.0f) {\n\t\t\tval += 1000.0f;\n\t\t}\n\t});\n\n\tauto adjustWrapAround([&](Point<float>& diff) {\n\t\tadjust1(diff[0]);\n\t\tadjust1(diff[1]);\n\t});\n\n\n\t// draw edges\n\tTRACE(\"start edge loop in writeClustering, wrapAround? \", wrapAround);\n\tTRACE(\"num edges: \", g.numberOfEdges());\n\n\tg.forEdges([&](node u, node v) {\n\n\t\t// set edge color\n\t\tif (clustering[u] == clustering[v] && clustering[u] != none) {\n\t\t\t// same cluster\n\t\t\tfloat r = psColor[clustering[u] % numColors].r;\n\t\t\tfloat g = psColor[clustering[u] % numColors].g;\n\t\t\tfloat b = psColor[clustering[u] % numColors].b;\n\t\t\tfile << r << \" \" << g << \" \" << b << \" c \";\n\t\t}\n\t\telse {\n\t\t\t// different clusters -> grey\n\t\t\tfile << \"0.80 0.80 0.80 c 1.0 w \";\n\t\t}\n\n\t\t// set edge start and end point\n\t\tPoint<float> start = adjustToBoundingBox(g.getCoordinate(u));\n\t\tPoint<float> end = adjustToBoundingBox(g.getCoordinate(v));\n\t\tPoint<float> diff = {end[0] - start[0], end[1] - start[1]};\n\t\tif (wrapAround) {\n\t\t\tadjustWrapAround(diff);\n\t\t}\n\t\tend = start;\n\t\tend += diff;\n\n\t\t// write edge to file\n\t\tfile << \"p \" << start.toSsvString() << \" m \" << end.toSsvString() << \" l s\\n\";\n\t});\n\n\n\t// draw vertices\n\tfloat dotsize = 2.0;\n\tg.forNodes([&](node u) {\n\t\tif (clustering[u] != none) {\n\t\t\t// change color\n\t\t\tfloat r = psColor[clustering[u] % numColors].r;\n\t\t\tfloat g = psColor[clustering[u] % numColors].g;\n\t\t\tfloat b = psColor[clustering[u] % numColors].b;\n\t\t\tfile << r << \" \" << g << \" \" << b << \" c \";\n\t\t}\n\t\telse {\n\t\t\tfile << \"0.0 0.0 0.0 c \";\n\t\t}\n\n\t\tPoint<float> point = adjustToBoundingBox(g.getCoordinate(u));\n\n\t\tfile << \"p \" << point.toSsvString() << \" \" << dotsize << \" 0.00 360.00 a s\\n\";\n//\t\tTRACE(\"write coordinate to file: \", point[0], \", \", point[1]);\n\t});\n}\n\nvoid PostscriptWriter::init(std::string path, std::ofstream& file) {\n\tTRACE(\"start ps init\");\n\n\tfile.open(path.c_str());\n\tfile.precision(3);\n\tfile << std::fixed;\n\n\twriteHeader(file);\n\twriteMacros(file);\n\tfile << \"0.000 0.000 0.000 c\\n\";\n}\n\nvoid PostscriptWriter::write(Graph& g, Partition& clustering, std::string path) {\n\tTRACE(\"start ps write clustering\");\n\tassert(g.getCoordinate(0).getDimensions() == 2);\n\n\tstd::ofstream file;\n\tinit(path, file);\n\n\twriteClustering(g, clustering, file);\n\n\tif (! wrapAround) {\n\t\tfile << \"grestore\\n\";\n\t}\n\tfile.close();\n}\n\nvoid PostscriptWriter::write(Graph& g, std::string path) {\n\tTRACE(\"start ps write\");\n\tClusteringGenerator gen;\n\tPartition allNone = gen.makeOneClustering(g);\n\twrite(g, allNone, path);\n}\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6795665621757507, "alphanum_fraction": 0.6919504404067993, "avg_line_length": 17.457143783569336, "blob_id": "5df6b314635cd99d4f7b807389b874621df4842f", "content_id": "dcaaecdd165ac671c17c8063298ba534079874b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 646, "license_type": "permissive", "max_line_length": 91, "num_lines": 35, "path": "/networkit/cpp/generators/test/GeneratorsBenchmark.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GeneratorsBenchmark.h\n *\n * Created on: May 29, 2013\n * Author: forigem\n */\n\n#ifndef NOGTEST\n\n#ifndef GENERATORSBENCHMARK_H_\n#define GENERATORSBENCHMARK_H_\n\n#include <gtest/gtest.h>\n\n#include \"../../auxiliary/Timer.h\"\n\nnamespace NetworKit {\n\nclass GeneratorsBenchmark: public testing::Test {\nprotected:\n\ttemplate <typename L>\n\tuint64_t timeOnce(L f) {\n\t\t// TODO should be moved somewhere else (Benchmark parent class or the Timer class itself)\n\t\tAux::Timer timer;\n\t\ttimer.start();\n\t\tf();\n\t\ttimer.stop();\n\t\treturn timer.elapsedMilliseconds();\n\t}\n};\n\n} /* namespace NetworKit */\n#endif /* GENERATORSBENCHMARK_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6837881207466125, "alphanum_fraction": 0.6982343792915344, "avg_line_length": 21.25, "blob_id": "bc6bbfdaa89124760a915301c8acff359ae9aa3c", "content_id": "d334f73655f7fe5bddc16cea7267baae2ea879fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 623, "license_type": "permissive", "max_line_length": 143, "num_lines": 28, "path": "/networkit/cpp/numerics/LinearSolver.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LinearSolver.cpp\n *\n * Created on: 30.10.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"LinearSolver.h\"\n\nnamespace NetworKit {\n\nLinearSolver::LinearSolver(double tolerance) : tolerance(tolerance){\n}\n\nLinearSolver::~LinearSolver(){\n}\n\nvoid LinearSolver::setup(const Graph &graph) {\n\tsetup(CSRMatrix::graphLaplacian(graph));\n}\n\nvoid LinearSolver::parallelSolve(const std::vector<Vector> &rhs, std::vector<Vector> &results, count maxConvergenceTime, count maxIterations) {\n\tfor (index i = 0; i < rhs.size(); ++i) {\n\t\tsolve(rhs[i], results[i], maxConvergenceTime, maxIterations);\n\t}\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6780141592025757, "alphanum_fraction": 0.7046099305152893, "avg_line_length": 25.355140686035156, "blob_id": "52336263d7775f862c97bbf3c1eeaa3df911ae18", "content_id": "c0775aff269a0fa5e93c134cd886a0b04b766370", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2820, "license_type": "permissive", "max_line_length": 101, "num_lines": 107, "path": "/networkit/cpp/auxiliary/Random.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Random.cpp\n *\n * Created on: 02.01.2014\n * Author: FJW\n */\n\n#include <cmath>\n#include <omp.h>\n#include <limits>\n\n#include \"Random.h\"\n\n// If GCC does not support thread local, we are sad and don't use it:\n#ifdef __GNUC__\n#\tif (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)\n#\t\tdefine AUX_THREAD_LOCAL thread_local\n#\telse\n#\t\tdefine AUX_THREAD_LOCAL\n#\tendif\n#else // we don't know our plattform, so don't support it:\n#\tdefine AUX_THREAD_LOCAL \n#endif\n\nnamespace Aux {\nnamespace Random {\n\nstatic bool staticSeed = false;\nstatic uint64_t seedValue = 0;\nstatic uint64_t globalSeedGeneration = 0; // global seed generation, updated on every setSeed-call\nstatic bool seedUseThredId = false;\n\nvoid setSeed(uint64_t seed, bool useThreadId) {\n\tseedValue = seed;\n\tstaticSeed = true;\n\tseedUseThredId = useThreadId;\n\t++globalSeedGeneration;\n\tgetURNG(); // update local seed value\n}\n\nuint64_t getSeed() {\n\tif (!staticSeed) {\n\t\tAUX_THREAD_LOCAL static std::random_device urng{};\n\t\tstd::uniform_int_distribution<uint64_t> dist{};\n\t\treturn dist(urng);\n\t} else if (seedUseThredId) {\n\t\treturn seedValue + omp_get_thread_num();\n\t} else {\n\t\treturn seedValue;\n\t}\n}\n\n\nstd::mt19937_64& getURNG() {\n\tAUX_THREAD_LOCAL static std::mt19937_64 generator{getSeed()};\n\tAUX_THREAD_LOCAL static uint64_t localSeedGeneration = std::numeric_limits<uint64_t>::max();\n\tif (staticSeed && localSeedGeneration != globalSeedGeneration) {\n\t\tgenerator.seed(getSeed());\n\t\tlocalSeedGeneration = globalSeedGeneration;\n\t}\n\treturn generator;\n}\n\nuint64_t integer() {\n\tAUX_THREAD_LOCAL static std::uniform_int_distribution<uint64_t> dist{};\n\treturn dist(getURNG());\n}\nuint64_t integer(uint64_t upperBound) {\n\tstd::uniform_int_distribution<uint64_t> dist{0, upperBound};\n\treturn dist(getURNG());\n}\nuint64_t integer(uint64_t lowerBound, uint64_t upperBound) {\n\tstd::uniform_int_distribution<uint64_t> dist{lowerBound, upperBound};\n\treturn dist(getURNG());\n}\n\ndouble real() {\n\tAUX_THREAD_LOCAL static std::uniform_real_distribution<double> dist{};\n\treturn dist(getURNG());\n}\ndouble real(double upperBound) {\n\tstd::uniform_real_distribution<double> dist{0.0, upperBound};\n\treturn dist(getURNG());\n}\ndouble real(double lowerBound, double upperBound) {\n\tstd::uniform_real_distribution<double> dist{lowerBound, upperBound};\n\treturn dist(getURNG());\n}\n\ndouble probability() {\n\tAUX_THREAD_LOCAL static std::uniform_real_distribution<double> dist{0.0, std::nexttoward(1.0, 2.0)};\n\treturn dist(getURNG());\n}\n\nstd::size_t index(std::size_t max) {\n\tassert(max > 0 && \"There have to be valid indexes\");\n\tstd::uniform_int_distribution<std::size_t> dist{0, max - 1};\n\treturn dist(getURNG());\n}\n\n// uint64_t binomial(double n, double p) {\n// \tstd::binomial_distribution<uint64_t> dist(n, p);\n// \treturn dist(getURNG());\n// }\n\n} // namespace Random\n} // namespace Aux\n" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 13.34375, "blob_id": "ca31e4e43e15dd2ad46d949b587921c66c436004", "content_id": "affcaa671ed58fc31ba4769877631d6ed7a8d006", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 459, "license_type": "permissive", "max_line_length": 42, "num_lines": 32, "path": "/networkit/cpp/algebraic/test/VectorGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * VectorGTest.h\n *\n * Created on: 13.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef VECTORGTEST_H_\n#define VECTORGTEST_H_\n\n#include <gtest/gtest.h>\n#include \"../Vector.h\"\n#include \"../Matrix.h\"\n#include \"../../auxiliary/Log.h\"\n#include <cmath>\n\nnamespace NetworKit {\n\nclass VectorGTest : public testing::Test {\npublic:\n\tVectorGTest();\n\tvirtual ~VectorGTest();\n};\n\n\n} /* namespace NetworKit */\n\n#endif /* VECTORGTEST_H_ */\n\n#endif\n" }, { "alpha_fraction": 0.6496519446372986, "alphanum_fraction": 0.6705336570739746, "avg_line_length": 13.862069129943848, "blob_id": "d7d2afee94d61e3ae7254a55b5337b6c16901b8a", "content_id": "408f8bd90a42fb4c59b1bfc4c75e0e1deafaeba0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 431, "license_type": "permissive", "max_line_length": 66, "num_lines": 29, "path": "/networkit/cpp/centrality/DynCentrality.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynSSSP.h\n *\n * Created on: 31.07.2014\n * Author: ebergamini\n */\n\n#ifndef DYNCENTRALITY_H_\n#define DYNCENTRALITY_H_\n\n#include \"../dynamics/GraphEvent.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n * Interface for dynamic centrality algorithms.\n */\nclass DynCentrality {\n\npublic:\n\n virtual void update(const std::vector<GraphEvent>& batch) = 0;\n\n};\n\n} /* namespace NetworKit */\n\n#endif /* DYNCENTRALITY_H_ */\n" }, { "alpha_fraction": 0.3890882432460785, "alphanum_fraction": 0.6910927891731262, "avg_line_length": 42.146942138671875, "blob_id": "8753df37e4513976173243fd4247f40faf9ea7a6", "content_id": "f9923333be67df3099092e43874f650b6d871531", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 43751, "license_type": "permissive", "max_line_length": 188, "num_lines": 1014, "path": "/networkit/cpp/generators/test/GeneratorsGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\nDy * GeneratorsTest.cpp\n *\n * Created on: 09.04.2013\n * Author: cls\n */\n\n#ifndef NOGTEST\n\n\n#include \"GeneratorsGTest.h\"\n\n#include <numeric>\n#include <cmath>\n\n#include \"../DynamicGraphSource.h\"\n#include \"../DynamicBarabasiAlbertGenerator.h\"\n#include \"../PubWebGenerator.h\"\n#include \"../DynamicPubWebGenerator.h\"\n#include \"../ErdosRenyiGenerator.h\"\n#include \"../ChungLuGenerator.h\"\n#include \"../HavelHakimiGenerator.h\"\n#include \"../RmatGenerator.h\"\n#include \"../BarabasiAlbertGenerator.h\"\n#include \"../DynamicPathGenerator.h\"\n#include \"../DynamicForestFireGenerator.h\"\n#include \"../DynamicDorogovtsevMendesGenerator.h\"\n#include \"../DorogovtsevMendesGenerator.h\"\n#include \"../WattsStrogatzGenerator.h\"\n#include \"../RegularRingLatticeGenerator.h\"\n#include \"../StochasticBlockmodel.h\"\n#include \"../EdgeSwitchingMarkovChainGenerator.h\"\n#include \"../LFRGenerator.h\"\n\n\n#include \"../../viz/PostscriptWriter.h\"\n#include \"../../community/ClusteringGenerator.h\"\n#include \"../../community/PLP.h\"\n#include \"../../community/PLM.h\"\n#include \"../../io/METISGraphWriter.h\"\n#include \"../../io/DotGraphWriter.h\"\n#include \"../../io/GraphIO.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../community/Modularity.h\"\n#include \"../../dynamics/GraphUpdater.h\"\n#include \"../../auxiliary/MissingMath.h\"\n#include \"../../auxiliary/Parallel.h\"\n#include \"../../auxiliary/Random.h\"\n#include \"../../global/ClusteringCoefficient.h\"\n#include \"../../community/PLM.h\"\n#include \"../../community/Modularity.h\"\n\n\nnamespace NetworKit {\n\nGeneratorsGTest::GeneratorsGTest() {\n\n}\n\nTEST_F(GeneratorsGTest, testDynamicBarabasiAlbertGeneratorSingleStep) {\n\tcount k = 2; // number of edges added per node\n\tDynamicGraphSource* gen = new DynamicBarabasiAlbertGenerator(k);\n\tGraphEventProxy* Gproxy = gen->newGraph();\n\tGraph* G = Gproxy->G;\n\n\tgen->initializeGraph();\n\n\tcount nPre = G->numberOfNodes();\n\tcount mPre = G->numberOfEdges();\n\tEXPECT_EQ(k, nPre) << \"graph should have been initialized to k nodes\";\n\tEXPECT_EQ(k - 1, mPre) << \"graph should have been initialized to a path of k nodes which means k-1 edges\";\n\n\t// perform single preferential attachment step\n\tgen->generate();\n\n\tcount nPost = G->numberOfNodes();\n\tcount mPost = G->numberOfEdges();\n\tEXPECT_EQ(nPre + 1, nPost) << \"one more node should have been added\";\n\tEXPECT_EQ(mPre + k, mPost) << \"k edges should have been added\";\n\n\tdelete gen;\n\tdelete Gproxy;\n\tdelete G;\n}\n\nTEST_F(GeneratorsGTest, testDynamicBarabasiAlbertGenerator) {\n\tDynamicGraphSource* gen = new DynamicBarabasiAlbertGenerator(2);\n\n\tGraphEventProxy* Gproxy = gen->newGraph();\n\tGraph* G = Gproxy->G;\n\n\tgen->initializeGraph();\n\n\tEXPECT_EQ(2u, G->numberOfNodes()) << \"initially the generator creates two connected nodes\";\n\tEXPECT_EQ(1u, G->numberOfEdges()) << \"initially the generator creates two connected nodes\";\n\n\tcount n = 100;\n\n\tgen->generateWhile([&]() {\n\t\t\t\treturn ( G->numberOfNodes() < n );\n\t\t\t});\n\n\tEXPECT_EQ(n, G->numberOfNodes());\n\tDEBUG(\"m = \" , G->numberOfEdges());\n\n\t// resume generator\n\n\tgen->generateWhile([&]() {\n\t\treturn (G->numberOfNodes() < 2 * n);\n\t});\n\tEXPECT_EQ(2 * n, G->numberOfNodes());\n\n\tdelete gen;\n\tdelete Gproxy;\n\tdelete G;\n}\n\n\nTEST_F(GeneratorsGTest, viewDynamicBarabasiAlbertGenerator) {\n\tDynamicGraphSource* gen = new DynamicBarabasiAlbertGenerator(2);\n\tGraphEventProxy* Gproxy = gen->newGraph();\n\tGraph* G = Gproxy->G;\n\tgen->initializeGraph();\n\tcount n = 42;\n\tgen->generateWhile([&]() {\n\t\t\t\treturn ( G->numberOfNodes() < n );\n\t\t\t});\n\tMETISGraphWriter writer;\n\twriter.write(*G, \"output/BATest.graph\");\n\n\tdelete gen;\n\tdelete Gproxy;\n\tdelete G;\n}\n\nTEST_F(GeneratorsGTest, testStaticPubWebGenerator) {\n\tcount n = 1800;\n\tcount numCluster = 24;\n\tcount maxNumNeighbors = 36;\n\tfloat rad = 0.075;\n\n\tPubWebGenerator gen(n, numCluster, rad, maxNumNeighbors);\n\tGraph G = gen.generate();\n\tEXPECT_EQ(n, G.numberOfNodes()) << \"number of generated nodes\";\n\n\t// check degree\n\tG.forNodes([&](node v) {\n\t\tEXPECT_LE(G.degree(v), maxNumNeighbors) << \"maximum degree\";\n\t});\n\n\t// 1-clustering\n\tClusteringGenerator clusterGen;\n\tPartition oneClustering = clusterGen.makeOneClustering(G);\n\tEXPECT_EQ(G.numberOfNodes(),oneClustering.numberOfElements());\n\n\t// output to EPS file\n\tPostscriptWriter psWriter(true);\n\tpsWriter.write(G, oneClustering, \"output/pubweb.eps\");\n\n\t// clustering\n\tPLM clusterAlgo(G);\n\tclusterAlgo.run();\n\tPartition clustering = clusterAlgo.getPartition();\n\tEXPECT_EQ(G.numberOfNodes(),clustering.numberOfElements());\n\tpsWriter.write(G, clustering, \"output/pubweb-clustered-PLM.eps\");\n\n\tModularity mod;\n\tdouble modVal = mod.getQuality(clustering, G);\n\tEXPECT_GE(modVal, 0.2) << \"modularity of clustering\";\n\tDEBUG(\"Modularity of clustering: \" , modVal);\n\tDEBUG(\"Total edge weight: \" , G.totalEdgeWeight());\n\tEXPECT_TRUE(G.checkConsistency());\n}\n\n\nTEST_F(GeneratorsGTest, testDynamicPubWebGenerator) {\n//\tcount nSteps = 100;\n//\tcount n = 1200;\n\tcount nSteps = 15;\n\tcount n = 300;\n\tcount numCluster = 30;\n\tcount maxNumNeighbors = 40;\n\tfloat rad = 0.08;\n\n\tDynamicPubWebGenerator dynGen(n, numCluster, rad, maxNumNeighbors, false);\n\tGraph G = dynGen.getGraph();\n\tGraphUpdater gu(G);\n\tstd::vector<GraphEvent> stream;\n\n\t// static clustering algorithm for better visual output\n\tPostscriptWriter psWriter(true);\n\tpsWriter.write(G, \"output/pubweb-0000.eps\");\n\n\tfor (index i = 1; i <= nSteps; ++i) {\n\t\tstream = dynGen.generate(1);\n\t\tDEBUG(\"updating graph\");\n\t\tgu.update(stream);\n\t\tG.initCoordinates();\n\n\t\tDEBUG(\"updated graph, new (n, m) = (\" , G.numberOfNodes() , \", \" , G.numberOfEdges() , \")\");\n\t\tedgeweight tew = G.totalEdgeWeight();\n\t\tDEBUG(\"1/2 graph volume: \", tew);\n\t\tEXPECT_GT(tew, 0);\n\n\t\t// update coordinates\n\t\tstd::map<node, Point<float> > newCoordinates = dynGen.getNewCoordinates();\n\t\tfor (std::map<node, Point<float> >::iterator iter = newCoordinates.begin();\n\t\t\t\titer != newCoordinates.end(); ++iter) {\n\t\t\tnode v = iter->first;\n\t\t\tPoint<float> p = iter->second;\n\t\t\tG.setCoordinate(v, p);\n\t\t}\n\n\t\t// output for visual inspection\n\t\tchar path[23];\n\t\tsprintf(path, \"output/pubweb-%04llu.eps\", static_cast<unsigned long long>(i));\n\t\tTRACE(\"path: \" , path);\n\t\tpsWriter.write(G, path);\n\t}\n}\n\n/**\n * Testing the dynamic hyperbolic generator with fixed parameters and changing node positions\n */\nTEST_F(GeneratorsGTest, testDynamicHyperbolicGeneratorOnMovedNodes) {\n\t//set up dynamic parameters\n\tint nSteps = 20;\n\tconst count n = 1000;\n\tconst double k = 6;\n\tconst double alpha = 1;\n\t//const double exp = 2*alpha+1;\n\tconst double T = 0;\n\tconst double R = HyperbolicSpace::getTargetRadius(n, n*k/2, alpha, T);\n\n\tdouble movedShare = 1;\n\tdouble moveDistance = 0.1;\n\n\t//set up initial node positions\n\tvector<double> angles(n, -1);\n\tvector<double> radii(n, -1);\n\tHyperbolicSpace::fillPoints(angles, radii, R, alpha);\n\tDynamicHyperbolicGenerator dynGen(angles, radii, R, alpha, T, movedShare, moveDistance);\n\n\t//generate starting graph\n\tGraph G = HyperbolicGenerator().generate(angles, radii, R);\n\tcount initialEdgeCount = G.numberOfEdges();\n\tcount expected = n*HyperbolicSpace::getExpectedDegree(n, alpha, R)*0.5;\n\tEXPECT_NEAR(initialEdgeCount, expected, expected/5);\n\tGraphUpdater gu(G);\n\tstd::vector<GraphEvent> stream;\n\n\tfor (int i = 0; i < nSteps; i++) {\n\t\t//move nodes and generate stream of affected edges\n\t\tstream = dynGen.generate(1);\n\t\tDEBUG(\"Edges: \", G.numberOfEdges());\n\t\tfor (auto event : stream) {\n\t\t\tEXPECT_TRUE(event.type == GraphEvent::EDGE_REMOVAL || event.type == GraphEvent::EDGE_ADDITION || event.type == GraphEvent::TIME_STEP);\n\t\t\tif (event.type == GraphEvent::EDGE_REMOVAL) {\n\t\t\t\tEXPECT_TRUE(G.hasEdge(event.u, event.v));\n\t\t\t}\n\t\t\t//only present nodes can be affected, no new nodes are introduced\n\t\t\tif (event.type != GraphEvent::TIME_STEP) EXPECT_LT(event.u, G.upperNodeIdBound());\n\t\t}\n\t\tgu.update(stream);\n\t\tEXPECT_TRUE(G.checkConsistency());\n\t}\n\n\t//update moved nodes\n\tangles = getAngles(dynGen);\n\tradii = getRadii(dynGen);\n\tGraph comparison = HyperbolicGenerator().generate(angles, radii, R);\n\tEXPECT_EQ(G.numberOfEdges(), comparison.numberOfEdges());\n\n\t//heuristic criterion: Number of edges may change, but should not change much\n\tEXPECT_NEAR(G.numberOfEdges(), initialEdgeCount, initialEdgeCount/5);\n}\n\n/**\n * creates a series of pictures visualizing the effect of the dynamic hyperbolic generator\n */\nTEST_F(GeneratorsGTest, testDynamicHyperbolicVisualization) {\n\tcount n = 300;\n\tcount nSteps = 20;\n\n\tconst double k = 6;\n\tconst double alpha = 1;\n\t//const double exp = 2*alpha+1;\n\tconst double T = 0;\n\tconst double R = HyperbolicSpace::getTargetRadius(n, n*k/2, alpha, T);\n\n\tdouble movedShare = 0.2;\n\tdouble moveDistance = 1;\n\tvector<double> angles(n);\n\tvector<double> radii(n);\n\n\tHyperbolicSpace::fillPoints(angles, radii, R, alpha);\n\n\tDynamicHyperbolicGenerator dynGen(angles, radii, R, alpha, T, movedShare, moveDistance);\n\tGraph G = dynGen.getGraph();\n\n\tGraphUpdater gu(G);\n\tstd::vector<GraphEvent> stream;\n\tG.initCoordinates();\n\tPostscriptWriter psWriter(true);\n\tpsWriter.write(G, \"output/hyperbolic-0000.eps\");\n\n\tfor (index i = 0; i < nSteps; i++) {\n\t\tstream = dynGen.generate(1);\n\t\tDEBUG(\"Edges: \", G.numberOfEdges());\n\t\tfor (auto event : stream) {\n\t\t\tEXPECT_TRUE(event.type == GraphEvent::EDGE_REMOVAL || event.type == GraphEvent::EDGE_ADDITION || event.type == GraphEvent::TIME_STEP);\n\t\t}\n\t\tgu.update(stream);\n\t\tG.initCoordinates();\n\n\t\tauto coords = dynGen.getCoordinates();\n\t\tfor (index j = 0; j < coords.size(); j++) {\n\t\t\tG.setCoordinate(j, coords[j]);\n\t\t}\n\n\t\t// output for visual inspection\n\t\tchar path[27];//TODO: come on, this is ridiculous!\n\t\tsprintf(path, \"output/hyperbolic-%04llu.eps\", static_cast<unsigned long long>(i));\n\t\tTRACE(\"path: \" , path);\n\t\tpsWriter.write(G, path);\n\t}\n}\n\nTEST_F(GeneratorsGTest, testBarabasiAlbertGeneratorOriginal) {\n\tcount k = 3;\n\tcount nMax = 100;\n\tcount n0 = 3;\n\n\tBarabasiAlbertGenerator BarabasiAlbert(k, nMax, n0, false);\n\tGraph G = BarabasiAlbert.generate();\n\tEXPECT_FALSE(G.isEmpty());\n\n\tEXPECT_EQ(nMax, G.numberOfNodes());\n\tEXPECT_EQ( ((n0-1) + ((nMax - n0) * k)), G.numberOfEdges());\n\tEXPECT_TRUE(G.checkConsistency());\n\n\tGraph initGraph(4);\n\tinitGraph.addEdge(0,1);\n\tinitGraph.addEdge(2,1);\n\tinitGraph.addEdge(2,3);\n\tinitGraph.addEdge(0,3);\n\tBarabasiAlbert = BarabasiAlbertGenerator(k, nMax, initGraph, false);\n\tG = BarabasiAlbert.generate();\n\n\tEXPECT_EQ(nMax, G.numberOfNodes());\n\tEXPECT_EQ(G.numberOfEdges(), (nMax - initGraph.numberOfNodes()) * k + initGraph.numberOfEdges());\n\tEXPECT_TRUE(G.checkConsistency());\n}\n\nTEST_F(GeneratorsGTest, testBarabasiAlbertGeneratorConstructor) {\n\t// k > nMax\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(10, 9, 8, false), std::runtime_error);\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(10, 9, 8, true), std::runtime_error);\n\n\t// n0 > nMax\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(5, 9, 10, false), std::runtime_error);\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(5, 9, 10, true), std::runtime_error);\n\n\t// n0 = initGraph.numberOfNodes() > nMax\n\tGraph initGraph(10);\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(6, 9, initGraph, false), std::runtime_error);\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(6, 9, initGraph, true), std::runtime_error);\n\n\t// initGraph, k > nMax\n\tinitGraph = Graph(6);\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(10, 9, initGraph, false), std::runtime_error);\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(10, 9, initGraph, true), std::runtime_error);\n\n\t// initGraph, original method, initGraph.numberOfNodes() < k\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(8, 9, initGraph, false), std::runtime_error);\n\n\t// initGraph does not have consecutive node ids\n\tinitGraph.removeNode(0);\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(3, 9, initGraph, false), std::runtime_error);\n\tEXPECT_THROW(BarabasiAlbertGenerator generator(3, 9, initGraph, false), std::runtime_error);\n}\n\nTEST_F(GeneratorsGTest, testBarabasiAlbertGeneratorBatagelj) {\n\tcount k = 3;\n\tcount nMax = 100;\n\tcount n0 = 3;\n\n\tBarabasiAlbertGenerator BarabasiAlbert(k, nMax, n0, true);\n\tGraph G = BarabasiAlbert.generate();\n\n\tEXPECT_EQ(nMax, G.numberOfNodes());\n\tEXPECT_LE(G.numberOfEdges(), nMax * k);\n\tEXPECT_TRUE(G.checkConsistency());\n\n\tGraph initGraph(4);\n\tinitGraph.addEdge(0,1);\n\tinitGraph.addEdge(2,1);\n\tinitGraph.addEdge(2,3);\n\tinitGraph.addEdge(0,3);\n\tBarabasiAlbert = BarabasiAlbertGenerator(k, nMax, initGraph, true);\n\tG = BarabasiAlbert.generate();\n\n\tEXPECT_EQ(nMax, G.numberOfNodes());\n\tEXPECT_LE(G.numberOfEdges(), nMax * k);\n\tEXPECT_TRUE(G.checkConsistency());\n\n}\n\nTEST_F(GeneratorsGTest, generatetBarabasiAlbertGeneratorGraph) {\n\t\tcount k = 3;\n\t\tcount nMax = 1000;\n\t\tcount n0 = 3;\n\n\t\tBarabasiAlbertGenerator BarabasiAlbert(k, nMax, n0);\n\n\t\tGraph G = BarabasiAlbert.generate();\n\t\tGraphIO io;\n\t\tio.writeAdjacencyList(G, \"output/\"\n\t\t\t\t\"BarabasiGraph.txt\");\n}\n\nTEST_F(GeneratorsGTest, testDynamicPathGenerator) {\n\tDynamicPathGenerator gen;\n\tauto stream = gen.generate(42);\n#if LOG_LEVEL == LOG_LEVEL_TRACE\n\tfor (auto ev : stream) {\n\t\tTRACE(ev.toString());\n\t}\n#endif\n}\n\nTEST_F(GeneratorsGTest, testErdosRenyiGenerator) {\n\tcount n = 2000;\n\tdouble p = 1.5 * (log(n) / (double) n);\n\n\tErdosRenyiGenerator generator(n, p);\n\tGraph G = generator.generate();\n\tEXPECT_EQ(n, G.numberOfNodes());\n\tEXPECT_FALSE(G.isEmpty());\n\tEXPECT_TRUE(G.checkConsistency());\n\n\tcount nPairs = (n * (n-1)) / 2;\n\tcount nEdges = G.numberOfEdges();\n\tEXPECT_GE(nEdges, 0.75 * p * nPairs);\n\tEXPECT_LE(nEdges, 1.25 * p * nPairs);\n\n\tDEBUG(\"Number of edges with probability \" , p , \" (actual/expected): \" , nEdges , \" / \" , (nPairs * p));\n\tEXPECT_TRUE(G.checkConsistency());\n}\n\nTEST_F(GeneratorsGTest, testRmatGeneratorException) {\n\tcount scale = 9;\n\tcount edgeFactor = 12;\n\tdouble a = 0.51;\n\tdouble b = 0.12;\n\tdouble c = 0.12;\n\tdouble d = 0.2;\n\n\tEXPECT_THROW(RmatGenerator rmat(scale, edgeFactor, a, b, c, d), std::runtime_error);\n}\n\nTEST_F(GeneratorsGTest, testRmatGenerator) {\n\tcount scale = 9;\n\tcount n = (1 << scale);\n\tcount edgeFactor = 12;\n\tdouble a = 0.51;\n\tdouble b = 0.12;\n\tdouble c = 0.12;\n\tdouble d = 0.25;\n\n\tRmatGenerator rmat(scale, edgeFactor, a, b, c, d);\n\tGraph G = rmat.generate();\n\n\tEXPECT_EQ(G.numberOfNodes(), n);\n\tEXPECT_LE(G.numberOfEdges(), n * edgeFactor);\n\n\tClusteringCoefficient cc;\n\tdouble ccex = cc.exactGlobal(G);\n\tEXPECT_LE(ccex, 0.4);\n\n\tPLM clusterer(G, true);\n\tclusterer.run();\n\tPartition zeta = clusterer.getPartition();\n\tModularity mod;\n\tdouble modVal = mod.getQuality(zeta, G);\n\tINFO(\"Modularity of R-MAT graph clustering: \", modVal);\n\tEXPECT_GE(modVal, 0.0);\n\tEXPECT_TRUE(G.checkConsistency());\n}\n\n\nTEST_F(GeneratorsGTest, testChungLuGenerator) {\n\tcount n = 400;\n\tcount maxDegree = n / 8;\n\tstd::vector<count> sequence(n);\n\tcount expVolume = 0;\n\tcount actualVolume = 0;\n\n\t// fill sequence with random values (this is not power-law, of course!)\n\tfor (index i = 0; i < n; ++i) {\n\t\tsequence[i] = rand() % maxDegree;\n\t\texpVolume += sequence[i];\n\t}\n\n\tChungLuGenerator gen(sequence);\n\tGraph G = gen.generate();\n\tEXPECT_TRUE(G.checkConsistency());\n\n\tEXPECT_EQ(n, G.numberOfNodes());\n\tG.forNodes([&](node v) {\n\t\tactualVolume += G.degree(v);\n\t});\n\n\tINFO(\"expected volume: \", expVolume, \", actual volume: \", actualVolume);\n}\n\nTEST_F(GeneratorsGTest, testHavelHakimiGeneratorOnRandomSequence) {\n\tcount n = 400;\n\tcount maxDegree = n / 10;\n\tstd::vector<count> sequence(n);\n//\tstd::vector<count> sequence = {5, 4, 4, 3, 2, 2, 2, 2, 2, 2};\n\tbool realizable = false;\n\n\tdo {\n\t\t// fill sequence with random values (this is not power-law, of course!)\n\t\tfor (index i = 0; i < n; ++i) {\n\t\t\tsequence[i] = rand() % maxDegree;\n\t\t}\n\n\t\t// check if sequence is realizable\n\t\tHavelHakimiGenerator hhgen(sequence);\n\t\trealizable = hhgen.isRealizable();\n\n\t\tif (realizable) {\n\t\t\tGraph G = hhgen.generate();\n\t\t\tEXPECT_TRUE(G.checkConsistency());\n\t\t\tcount volume = std::accumulate(sequence.begin(), sequence.end(), 0);\n\t\t\tEXPECT_EQ(volume, 2 * G.numberOfEdges());\n\t\t}\n\t} while (! realizable);\n}\n\nTEST_F(GeneratorsGTest, testHavelHakimiGeneratorOnRealSequence) {\n\tMETISGraphReader reader;\n\tstd::vector<std::string> graphs = {\"input/jazz.graph\",\n\t\t\t\"input/lesmis.graph\"}; //, \"input/PGPgiantcompo.graph\", \"input/coAuthorsDBLP.graph\"};\n\n\tfor (auto path : graphs) {\n\t\tGraph G = reader.read(path);\n\t\tcount n = G.numberOfNodes();\n\t\tstd::vector<count> sequence(n);\n\t\tG.forNodes([&](node u){\n\t\t\tsequence[u] = G.degree(u);\n\n\t\t});\n\n\t\tHavelHakimiGenerator hhgen(sequence);\n\t\tGraph G2 = hhgen.generate();\n\t\tEXPECT_TRUE(G.checkConsistency());\n\n\t\tcount volume = std::accumulate(sequence.begin(), sequence.end(), 0);\n\t\tEXPECT_EQ(volume, 2 * G2.numberOfEdges());\n\n\t\tif (volume < 50000) {\n\t\t\tstd::vector<count> testSequence(n);\n\t\t\tG2.forNodes([&](node u){\n\t\t\t\ttestSequence[u] = G2.degree(u);\n\t\t\t});\n\n\t\t\tfor (index i = 0; i < n; ++i) {\n\t\t\t\tEXPECT_EQ(sequence[i], testSequence[i]);\n\t\t\t}\n\t\t}\n\t}\n}\n\nTEST_F(GeneratorsGTest, testHavelHakimiGeneratorOnUnrealizableSequence) {\n\tstd::vector<count> seq = {20, 10, 2, 2, 2, 2, 2, 2, 2, 2, 2};\n\n\tHavelHakimiGenerator hhgen(seq);\n\tEXPECT_THROW(hhgen.generate(), std::runtime_error);\n\n\thhgen = HavelHakimiGenerator(seq, true);\n\tGraph G = hhgen.generate();\n\n\tG.forNodes([&](node u) {\n\t\tEXPECT_EQ(std::min<count>(seq[u], 10), G.degree(u));\n\t});\n}\n\n\nTEST_F(GeneratorsGTest, testDynamicForestFireGenerator) {\n\tGraph G1(0);\n\tGraphUpdater gu1(G1);\n\tstd::vector<GraphEvent> stream;\n\tDynamicForestFireGenerator ffg1(0.0, false);\n\tstream = ffg1.generate(10);\n\tgu1.update(stream);\n\tEXPECT_TRUE(G1.checkConsistency());\n\tEXPECT_EQ(10u, G1.numberOfNodes());\n\tG1.forNodes([&](node u) {\n\t\tcount c = 0;\n\t\tG1.forNeighborsOf(u, [&](node v) {\n\t\t\tif (v < u) {\n\t\t\t\tc += 1;\n\t\t\t}\n\t\t});\n\t\tif (u == 0) {\n\t\t\tEXPECT_EQ(0u, c);\n\t\t} else {\n\t\t\tEXPECT_EQ(1u, c);\n\t\t}\n\t});\n\n\tGraph G2(0);\n\tGraphUpdater gu2(G2);\n\tDynamicForestFireGenerator ffg2(1.0, true, 1.0);\n\tstream = ffg2.generate(10);\n\tgu2.update(stream);\n\tEXPECT_TRUE(G2.checkConsistency());\n\tEXPECT_EQ(10u, G2.numberOfNodes());\n\tG2.forNodePairs([&](node u, node v) {\n\t\tif (v < u) {\n\t\t\tEXPECT_TRUE(G2.hasEdge(u,v));\n\t\t}\n\t});\n\tstream = ffg2.generate(10);\n\tgu2.update(stream);\n\tEXPECT_EQ(20u, G2.numberOfNodes());\n}\n\nTEST_F(GeneratorsGTest, testRegularRingLatticeGenerator) {\n\tint n0 = 10;\n\tint neighbors = 2;\n\tauto testRingLattice = [&](Graph G) {\n\t\tEXPECT_EQ(n0, (int) G.numberOfNodes());\n\t\tEXPECT_EQ(n0 * neighbors, (int) G.numberOfEdges());\n\t\tG.forNodePairs([&](node u, node v) {\n\t\t\tint diff = std::abs((int) u- (int) v);\n\t\t\tif (u != v && (diff <= neighbors || diff >= n0 - neighbors)) {\n\t\t\t\tEXPECT_TRUE(G.hasEdge(u,v));\n\t\t\t} else {\n\t\t\t\tEXPECT_FALSE(G.hasEdge(u,v));\n\t\t\t}\n\t\t});\n\t};\n\n\tRegularRingLatticeGenerator rrlg = RegularRingLatticeGenerator(n0, neighbors);\n\ttestRingLattice(rrlg.generate());\n}\n\nTEST_F(GeneratorsGTest, testWattsStrogatzGenerator) {\n\tint n0 = 10;\n\tint neighbors = 2;\n\tauto testRingLattice = [&](Graph G) {\n\t\tG.forNodePairs([&](node u, node v) {\n\t\t\tint diff = std::abs((int) u- (int) v);\n\t\t\tif (u != v && (diff <= neighbors || diff >= n0 - neighbors)) {\n\t\t\t\tEXPECT_TRUE(G.hasEdge(u,v));\n\t\t\t} else {\n\t\t\t\tEXPECT_FALSE(G.hasEdge(u,v));\n\t\t\t}\n\t\t});\n\t};\n\n\tWattsStrogatzGenerator wsg1 = WattsStrogatzGenerator(n0, neighbors, 0.0);\n\ttestRingLattice(wsg1.generate());\n\n\tWattsStrogatzGenerator wsg2 = WattsStrogatzGenerator(n0, neighbors, 0.3);\n\tGraph G = wsg2.generate();\n\tEXPECT_TRUE(G.checkConsistency());\n\tEXPECT_EQ(n0, (int) G.numberOfNodes());\n\tEXPECT_EQ(n0*neighbors, (int) G.numberOfEdges());\n}\n\nTEST_F(GeneratorsGTest, testDorogovtsevMendesGenerator) {\n\tint n0 = 20;\n\tDorogovtsevMendesGenerator dmg = DorogovtsevMendesGenerator(n0);\n\tGraph G = dmg.generate();\n\n\tEXPECT_EQ(n0, (int) G.numberOfNodes());\n\tEXPECT_EQ(2 * n0 - 3, (int) G.numberOfEdges());\n\tG.forNodes([&](node u) {\n\t\tcount c = 0;\n\t\tG.forNeighborsOf(u, [&](node v) {\n\t\t\tif (v < u) {\n\t\t\t\tc += 1;\n\t\t\t}\n\t\t});\n\t\tif (u <= 2) {\n\t\t\tEXPECT_EQ(u, c);\n\t\t} else {\n\t\t\tEXPECT_EQ(2u, c);\n\t\t}\n\t});\n\tEXPECT_TRUE(G.checkConsistency());\n}\n\nTEST_F(GeneratorsGTest, testDynamicDorogovtsevMendesGenerator) {\n\tcount n0 = 20;\n\tDynamicDorogovtsevMendesGenerator ddmg = DynamicDorogovtsevMendesGenerator();\n\tGraph G(0);\n\tGraphUpdater gu(G);\n\tstd::vector<GraphEvent> stream;\n\tstream = ddmg.generate(n0 - 3);\n\tgu.update(stream);\n\n\tEXPECT_EQ(n0, G.numberOfNodes());\n\tEXPECT_EQ(2*n0-3, G.numberOfEdges());\n\tG.forNodes([&](node u) {\n\t\tcount c = 0;\n\t\tG.forNeighborsOf(u, [&](node v) {\n\t\t\tif (v < u) {\n\t\t\t\tc += 1;\n\t\t\t}\n\t\t});\n\t\tif (u <= 2) {\n\t\t\tEXPECT_EQ(u, c);\n\t\t} else {\n\t\t\tEXPECT_EQ(2u, c);\n\t\t}\n\t});\n}\n\n\n\nTEST_F(GeneratorsGTest, testStochasticBlockmodel) {\n\tcount n = 10;\n\tcount nBlocks = 2;\n\tstd::vector<index> membership = {0, 0, 0, 0, 0, 1, 1, 1, 1, 1};\n\tstd::vector<std::vector<double> > affinity = {{1.0, 0.0}, {0.0, 1.0}};\n\tStochasticBlockmodel sbm(n, nBlocks, membership, affinity);\n\tGraph G = sbm.generate();\n\n\tEXPECT_EQ(n, G.numberOfNodes());\n\tEXPECT_EQ(20u, G.numberOfEdges());\n}\n\n/**\n * Test whether points generated in hyperbolic space fulfill basic constraints\n */\nTEST_F(GeneratorsGTest, testHyperbolicPointGeneration) {\n\tcount n = 1000;\n\tdouble stretch = Aux::Random::real(0.5,1.5);\n\tdouble alpha = Aux::Random::real(0.5,1.5);\n\tdouble R = HyperbolicSpace::hyperbolicAreaToRadius(n)*stretch;\n\tvector<double> angles(n, -1);\n\tvector<double> radii(n, -1);\n\tHyperbolicSpace::fillPoints(angles, radii, R, alpha);\n\tfor (index i = 0; i < n; i++) {\n\t\tEXPECT_GE(angles[i], 0);\n\t\tEXPECT_LT(angles[i], 2*M_PI);\n\t\tEXPECT_GE(radii[i], 0);\n\t\tEXPECT_LE(radii[i], R);\n\t}\n}\n\n/**\n * Test whether the number edges generated by the hyperbolic generator agree at least roughly with theory\n */\nTEST_F(GeneratorsGTest, testHyperbolicGenerator) {\n\tcount n = 100000;\n\tdouble k = 32;\n\tcount m = k*n/2;\n\tHyperbolicGenerator gen(n,k,7);\n\tGraph G = gen.generate();\n\tEXPECT_EQ(G.numberOfNodes(), n);\n\tEXPECT_TRUE(G.checkConsistency());\n\tEXPECT_NEAR(G.numberOfEdges(), m, m/5);\n}\n\n/**\n * Check consistency of graphs generated by the hyperbolic generator\n */\nTEST_F(GeneratorsGTest, testHyperbolicGeneratorConsistency) {\n\tcount n = 10000;\n\tdouble k = 6;\n\tcount m = n*k/2;\n\tHyperbolicGenerator gen(n, k);\n\tGraph G = gen.generate();\n\tEXPECT_NEAR(G.numberOfEdges(), m, m/5);\n\tASSERT_TRUE(G.checkConsistency());\n}\n\nTEST_F(GeneratorsGTest, testHyperbolicGeneratorMechanicGraphs) {\n\tcount n = 10000;\n\tdouble k = 6;\n\tcount m = n*k/2;\n\tHyperbolicGenerator gen(n, k, 3, 0.14);\n\tGraph G = gen.generate();\n\tEXPECT_NEAR(G.numberOfEdges(), m, m/10);\n\tASSERT_TRUE(G.checkConsistency());\n}\n\nTEST_F(GeneratorsGTest, testConfigurationModelGeneratorOnRealSequence) {\n\tMETISGraphReader reader;\n\tstd::vector<std::string> graphs = {\"input/jazz.graph\",\n\t\t\t\"input/lesmis.graph\"}; //, \"input/PGPgiantcompo.graph\", \"input/coAuthorsDBLP.graph\"};\n\n\tfor (auto path : graphs) {\n\t\tGraph G = reader.read(path);\n\t\tcount n = G.numberOfNodes();\n\t\tstd::vector<count> sequence(n);\n\t\tG.forNodes([&](node u){\n\t\t\tsequence[u] = G.degree(u);\n\t\t});\n\n\t\tbool skipTest = false;\n\t\tEdgeSwitchingMarkovChainGenerator gen(sequence, skipTest);\n\t\tGraph G2 = gen.generate();\n\n\t\tcount volume = std::accumulate(sequence.begin(), sequence.end(), 0);\n\t\tEXPECT_EQ(volume, 2 * G2.numberOfEdges());\n\n\t\tif (volume < 50000) {\n\t\t\tstd::vector<count> testSequence(n);\n\t\t\tG2.forNodes([&](node u){\n\t\t\t\ttestSequence[u] = G2.degree(u);\n\t\t\t});\n\t\t\tAux::Parallel::sort(testSequence.begin(), testSequence.end(), std::greater<count>());\n\t\t\tAux::Parallel::sort(sequence.begin(), sequence.end(), std::greater<count>());\n\n\t\t\tfor (index i = 0; i < n; ++i) {\n\t\t\t\tEXPECT_EQ(sequence[i], testSequence[i]);\n\t\t\t}\n\t\t}\n\t}\n}\n\nTEST_F(GeneratorsGTest, tryHyperbolicHighTemperatureGraphs) {\n\tcount n = 10000;\n\tdouble k = 10;\n\tdouble gamma = 3;\n\tcount m = n*k/2;\n\tfor (double T = 0; T < 10; T += 0.1) {\n\t\tif (std::abs(T-1) < 0.00001) continue;\n\t\tHyperbolicGenerator gen(n, k, gamma, T);\n\t\tGraph G = gen.generate();\n\t\tEXPECT_NEAR(G.numberOfEdges(), m, m/10);\n\t}\n}\n\nTEST_F(GeneratorsGTest, tryGiganticCollectionOfHyperbolicTemperatureGraphs) {\n\tfor (index i = 0; i < 30; i++) {\n\t\tcount n = 10000;\n\t\tdouble k = 10;\n\t\tdouble T = 0.1;\n\t\tcount m = n*k/2;\n\t\tHyperbolicGenerator gen(n, k, 3, T);\n\t\tGraph G = gen.generate();\n\t\tEXPECT_NEAR(G.numberOfEdges(), m, m/10);\n\t\t//EXPECT_TRUE(G.checkConsistency());\n\t}\n}\n\nTEST_F(GeneratorsGTest, tryGiganticCollectionOfHyperbolicUnitDiskGraphs) {\n\tcount n = 1000000;\n\tdouble k = 1;\n\tfor (index i = 0; i < 7; i++) {\n\t\tcount m = n*k/2;\n\t\tHyperbolicGenerator gen(n, k, 7);\n\t\tGraph G = gen.generate();\n\t\tEXPECT_NEAR(G.numberOfEdges(), m, m/5);\n\t\tEXPECT_TRUE(G.checkConsistency());\n\t\tk *= 2;\n\t}\n}\n\nTEST_F(GeneratorsGTest, testLFRGenerator) {\n\tAux::Random::setSeed(42, true);\n\tLFRGenerator gen(1000);\n\tgen.generatePowerlawDegreeSequence(20, 50, -2);\n\tgen.generatePowerlawCommunitySizeSequence(10, 50, -1);\n\tgen.setMu(0.5);\n\tgen.run();\n\tGraph G1 = gen.getMoveGraph();\n\tgen.run(); // should rewire the edges but nothing else\n\tGraph G2 = gen.getMoveGraph();\n\tEXPECT_EQ(1000, G1.numberOfNodes());\n\tEXPECT_EQ(1000, G2.numberOfNodes());\n\tEXPECT_EQ(G1.numberOfEdges(), G2.numberOfEdges());\n}\n\nTEST_F(GeneratorsGTest, tryLFRGeneratorImpossibleSequence) {\n\tAux::Random::setSeed(42, true);\n\tLFRGenerator gen(1000);\n\tgen.generatePowerlawDegreeSequence(35, 98, -2);\n\tgen.generatePowerlawCommunitySizeSequence(10, 50, -3);\n\tgen.setMu(0.5);\n\tEXPECT_THROW(gen.run(), std::runtime_error);\n\tEXPECT_THROW(gen.getMoveGraph(), std::runtime_error);\n}\n\nTEST_F(GeneratorsGTest, testLFRGeneratorWithRealData) {\n\tstd::vector<count> degreeSequence = {124, 54, 7, 15, 81, 35, 4, 37, 3, 33, 2, 79, 2, 47, 40, 102, 17, 75, 2, 43, 49, 11, 12, 3, 87, 44, 67, 18, 22, 51, 96, 9, 17, 1, 1, 81, 3,\n\t44, 59, 2, 30, 85, 69, 28, 45, 12, 30, 38, 32, 20, 11, 28, 42, 30, 8, 86, 57, 56, 50, 51, 39, 80, 47, 124, 41, 15, 5, 3, 100, 57, 34, 37, 7, 2, 1, 11, 75, 137, 47, 11, 67, 92,\n\t67, 34, 156, 33, 8, 29, 35, 203, 20, 2, 51, 2, 54, 69, 14, 4, 1, 1, 72, 22, 45, 11, 34, 2, 21, 7, 4, 25, 1, 16, 63, 86, 25, 143, 45, 49, 23, 80, 3, 21, 1, 11, 14, 38, 73,\n\t14, 9, 45, 45, 117, 58, 5, 10, 33, 25, 28, 20, 41, 37, 95, 15, 57, 114, 84, 1, 22, 61, 22, 126, 132, 40, 77, 84, 64, 12, 95, 2, 39, 67, 40, 1, 30, 80, 57, 62, 48, 1, 64, 56,\n\t3, 26, 46, 79, 53, 38, 16, 26, 71, 3, 1, 49, 1, 18, 62, 39, 117, 9, 81, 50, 38, 4, 9, 68, 76, 61, 51, 49, 50, 8, 84, 56, 2, 59, 2, 1, 29, 28, 109, 33, 12, 37, 45, 12, 39,\n\t26, 42, 18, 54, 1, 11, 194, 39, 24, 65, 30, 59, 56, 16, 16, 41, 12, 30, 26, 60, 4, 13, 43, 14, 44, 62, 63, 60, 54, 9, 27, 53, 85, 33, 98, 107, 54, 7, 106, 38, 172, 18,\n\t30, 32, 56, 2, 58, 1, 132, 56, 30, 61, 2, 54, 31, 1, 13, 9, 113, 83, 27, 171, 120, 74, 25, 22, 48, 24, 51, 46, 26, 9, 31, 15, 58, 12, 33, 39, 56, 77, 1, 51, 11, 2, 89, 40,\n\t15, 47, 52, 1, 39, 104, 41, 45, 14, 57, 17, 53, 45, 10, 1, 1, 36, 40, 79, 45, 76, 3, 64, 3, 6, 45, 6, 153, 24, 47, 62, 52, 80, 23, 4, 51, 108, 4, 70, 80, 40, 37, 38, 20, 7,\n\t30, 7, 5, 9, 59, 35, 27, 144, 22, 23, 10, 9, 1, 98, 19, 2, 53, 37, 41, 53, 47, 22, 6, 63, 1, 142, 8, 99, 48, 144, 62, 28, 20, 67, 7, 84, 52, 28, 16, 8, 10, 65, 88, 26, 24, 49,\n\t1, 49, 48, 10, 2, 20, 102, 1, 3, 3, 63, 21, 42, 34, 21, 22, 2, 12, 22, 74, 9, 104, 53, 24, 68, 28, 132, 88, 40, 46, 138, 41, 2, 24, 2, 79, 11, 75, 46, 61, 59, 21, 92, 1, 32, 16,\n\t68, 9, 48, 66, 100, 58, 51, 35, 52, 14, 22, 23, 39, 121, 55, 105, 2, 38, 41, 70, 41, 13, 30, 110, 13, 82, 12, 25, 29, 59, 9, 6, 35, 63, 2, 20, 56, 97, 22, 44, 27, 135, 66, 85, 115,\n\t31, 40, 119, 24, 28, 65, 24, 9, 18, 103, 82, 13, 36, 102, 67, 41, 2, 104, 79, 4, 2, 11, 1, 35, 16, 28, 90, 62, 6, 119, 64, 100, 47, 20, 80, 55, 32, 45, 5, 89, 61, 75, 2, 58, 28, 35,\n\t1, 150, 1, 82, 34, 1, 65, 97, 61, 2, 44, 3, 35, 76, 6, 13, 7, 9, 67, 81, 6, 56, 5, 70, 70, 48, 50, 37, 96, 48, 47, 71, 138, 18, 31, 2, 88, 18, 15, 108, 65, 9, 9, 79, 4, 70, 14, 86,\n\t5, 16, 1, 44, 1, 32, 26, 22, 44, 23, 103, 104, 82, 115, 30, 9, 38, 43, 3, 38, 11, 26, 62, 40, 32, 29, 66, 9, 39, 4, 66, 14, 26, 19, 14, 33, 121, 13, 23, 20, 41, 40, 68, 10, 160, 60,\n\t105, 156, 4, 27, 2, 120, 136, 61, 2, 32, 11, 46, 23, 157, 20, 41, 64, 35, 23, 32, 36, 46, 79, 12, 68, 30, 68, 45, 34, 102, 20, 66, 76, 32, 76, 31, 7, 2, 13, 184, 33, 73, 18, 43, 35,\n\t141, 43, 77, 7, 9, 28, 39, 27, 44, 109, 4, 8, 41, 30, 3, 48, 8, 160, 33, 65, 14, 100, 34, 2, 94, 35, 19, 45, 19, 30, 59, 30, 11, 5, 1, 73, 19, 48, 1, 248, 15, 13, 3, 1, 36, 99, 95, 50,\n\t72, 57, 68, 11, 42, 8, 40, 5, 68, 12, 34, 15, 93, 14, 63, 22, 6, 152, 55, 35, 60, 14, 13, 5, 49, 2, 1, 79, 59, 29, 86, 5, 71, 2, 23, 9, 64, 33, 77, 19, 23, 61, 39, 9, 30, 55,\n\t23, 42, 16, 108, 42, 40};\n\tstd::vector<count> partition = {1, 7, 3, 7, 1, 5, 13, 3, 7, 3, 8, 1, 16, 3, 2, 2, 10, 6, 11, 6, 7, 6, 3, 14, 3, 8, 2, 7, 2, 1, 5, 1, 4, 1, 18, 1, 14, 3, 1, 10, 4, 5, 1, 8, 7, 4,\n\t4, 4, 2, 10, 14, 2, 5, 6, 1, 5, 5, 4, 3, 1, 3, 6, 6, 5, 2, 8, 7, 6, 5, 5, 1, 1, 3, 16, 8, 2, 2, 6, 1, 12, 1, 3, 3, 5, 4, 6, 10, 3, 8, 1, 4, 4, 6, 4, 2, 1, 11, 4, 7, 1, 8, 3, 6,\n\t10, 4, 16, 1, 7, 6, 10, 2, 4, 4, 4, 6, 4, 4, 1, 6, 1, 12, 7, 1, 7, 8, 8, 5, 13, 6, 3, 4, 1, 5, 8, 7, 3, 9, 4, 9, 1, 3, 4, 2, 5, 4, 1, 18, 8, 3, 3, 4, 5, 1, 1, 2, 5, 3, 4, 8, 1,\n\t7, 7, 7, 7, 1, 3, 6, 3, 19, 1, 8, 7, 5, 7, 5, 3, 2, 5, 6, 3, 7, 3, 5, 5, 7, 5, 4, 2, 11, 4, 2, 5, 3, 6, 6, 2, 9, 1, 3, 5, 7, 8, 2, 10, 9, 17, 6, 8, 3, 6, 5, 3, 7, 9, 2, 2, 1, 1,\n\t4, 5, 13, 1, 3, 1, 10, 7, 5, 3, 1, 2, 3, 1, 3, 9, 6, 3, 11, 8, 2, 1, 5, 6, 9, 2, 6, 6, 2, 1, 1, 5, 5, 7, 7, 7, 6, 6, 5, 3, 6, 2, 2, 3, 2, 12, 4, 1, 6, 1, 13, 6, 1, 11, 1, 13, 3,\n\t1, 8, 6, 1, 1, 3, 2, 4, 9, 5, 6, 2, 7, 5, 2, 6, 8, 4, 2, 2, 8, 6, 9, 6, 8, 3, 6, 7, 4, 6, 1, 2, 8, 7, 3, 1, 1, 6, 5, 4, 10, 6, 6, 6, 6, 4, 8, 5, 1, 4, 1, 12, 3, 11, 1, 3, 10, 4,\n\t5, 1, 3, 6, 5, 2, 3, 1, 5, 1, 1, 6, 1, 4, 6, 6, 8, 7, 3, 9, 1, 7, 1, 1, 6, 8, 3, 9, 3, 6, 3, 7, 4, 10, 4, 7, 15, 3, 3, 4, 5, 1, 1, 1, 4, 5, 2, 2, 6, 8, 5, 4, 4, 12, 10, 6, 2, 9,\n\t4, 2, 7, 8, 7, 4, 6, 10, 2, 15, 12, 6, 3, 3, 7, 1, 3, 10, 4, 13, 2, 1, 3, 6, 3, 8, 2, 2, 4, 2, 6, 3, 2, 6, 4, 10, 4, 1, 3, 3, 7, 1, 6, 7, 1, 19, 4, 2, 6, 4, 7, 5, 2, 7, 2, 2, 3,\n\t2, 8, 2, 6, 4, 1, 1, 10, 7, 4, 3, 1, 9, 6, 5, 4, 2, 2, 1, 7, 1, 7, 8, 2, 2, 12, 1, 5, 1, 1, 6, 8, 1, 2, 1, 4, 6, 4, 2, 6, 8, 2, 2, 7, 1, 6, 5, 3, 4, 1, 1, 5, 17, 3, 1, 9, 4, 8,\n\t7, 8, 7, 4, 1, 2, 11, 2, 6, 5, 4, 7, 4, 3, 6, 7, 4, 5, 1, 5, 12, 5, 1, 2, 1, 2, 4, 8, 1, 4, 3, 6, 5, 12, 3, 9, 8, 2, 11, 6, 4, 7, 5, 4, 11, 1, 4, 1, 6, 3, 6, 9, 1, 4, 8, 2, 5, 4,\n\t5, 7, 2, 1, 3, 4, 1, 4, 7, 5, 8, 5, 8, 5, 8, 8, 2, 1, 10, 6, 7, 1, 8, 2, 3, 2, 6, 6, 4, 11, 3, 7, 3, 8, 10, 2, 3, 1, 3, 6, 4, 1, 4, 6, 6, 8, 4, 5, 3, 2, 5, 3, 4, 4, 5, 6, 10, 9,\n\t3, 4, 1, 2, 2, 5, 12, 3, 5, 4, 3, 8, 5, 4, 6, 4, 10, 5, 1, 4, 5, 1, 6, 3, 5, 4, 4, 6, 5, 1, 8, 4, 3, 3, 3, 6, 4, 4, 1, 7, 4, 8, 4, 2, 3, 4, 1, 2, 5, 4, 11, 2, 7, 1, 2, 5, 3, 12,\n\t4, 3, 3, 7, 1, 7, 3, 4, 7, 7, 5, 1, 2, 7, 3, 2, 5, 4, 4, 3, 9, 1, 4, 1, 3, 6, 8, 6, 2, 9, 4, 7, 2, 6, 3, 3, 6, 3, 2, 4, 4, 2, 6, 2, 4, 5, 2, 4, 5, 6, 3, 1, 7, 4, 4, 2, 3, 1, 8,\n\t3, 7, 3, 15, 13, 1, 8, 2, 3, 2, 6, 14, 1, 4, 1, 5, 5, 2, 5, 3, 9, 8, 7, 5, 2, 2, 2, 3, 6, 1};\n\tstd::vector<double> mu = {0.6209677419354839, 0.6851851851851851, 0.2857142857142857, 0.6, 0.5308641975308642, 0.2857142857142857, 0.75, 0.3783783783783784, 0.6666666666666667,\n\t0.4545454545454546, 0.5, 0.4177215189873418, 0.0, 0.3191489361702128, 0.275, 0.5196078431372548, 0.3529411764705882, 0.7066666666666667, 0.5, 0.3023255813953488, 0.7142857142857143,\n\t0.2727272727272727, 0.08333333333333337, 0.0, 0.3448275862068966, 0.5909090909090908, 0.5970149253731343, 0.5, 0.2727272727272727, 0.2941176470588235, 0.375, 0.0, 0.23529411764705888,\n\t0.0, 0.0, 0.308641975308642, 0.33333333333333337, 0.09090909090909094, 0.4067796610169492, 0.5, 0.16666666666666663, 0.3529411764705882, 0.21739130434782605, 0.3928571428571429,\n\t0.6444444444444444, 0.6666666666666667, 0.4, 0.07894736842105265, 0.28125, 0.30000000000000004, 0.7272727272727273, 0.4642857142857143, 0.26190476190476186, 0.5, 0.375,\n\t0.36046511627906974, 0.3508771929824561, 0.2678571428571429, 0.12, 0.196078431372549, 0.23076923076923073, 0.4125, 0.34042553191489366, 0.6532258064516129, 0.31707317073170727,\n\t0.5333333333333333, 0.4, 0.0, 0.62, 0.08771929824561409, 0.4117647058823529, 0.16216216216216217, 0.1428571428571429, 0.0, 0.0, 0.2727272727272727, 0.52, 0.5547445255474452,\n\t0.3191489361702128, 0.36363636363636365, 0.35820895522388063, 0.7282608695652174, 0.34328358208955223, 0.08823529411764708, 0.7371794871794872, 0.1515151515151515, 0.125,\n\t0.5862068965517242, 0.6, 0.5172413793103448, 0.19999999999999996, 0.5, 0.4901960784313726, 0.0, 0.7777777777777778, 0.2028985507246377, 0.7142857142857143, 0.75, 0.0, 0.0,\n\t0.75, 0.2727272727272727, 0.37777777777777777, 0.09090909090909094, 0.20588235294117652, 0.0, 0.4285714285714286, 0.5714285714285714, 0.75, 0.43999999999999995, 0.0, 0.625,\n\t0.23809523809523814, 0.5813953488372092, 0.19999999999999996, 0.4475524475524476, 0.19999999999999996, 0.12244897959183676, 0.5217391304347826, 0.13749999999999996, 0.33333333333333337,\n\t0.47619047619047616, 0.0, 0.36363636363636365, 0.3571428571428571, 0.4473684210526315, 0.1917808219178082, 0.7142857142857143, 0.2222222222222222, 0.5111111111111111, 0.3111111111111111,\n\t0.23931623931623935, 0.4655172413793104, 0.8, 0.8, 0.5757575757575757, 0.64, 0.1428571428571429, 0.8, 0.31707317073170727, 0.5135135135135135, 0.5789473684210527, 0.06666666666666665,\n\t0.5263157894736843, 0.7192982456140351, 0.40476190476190477, 0.0, 0.7727272727272727, 0.3114754098360656, 0.13636363636363635, 0.5714285714285714, 0.5681818181818181, 0.475,\n\t0.1428571428571429, 0.5357142857142857, 0.40625, 0.16666666666666663, 0.49473684210526314, 0.5, 0.3846153846153846, 0.5970149253731343, 0.625, 0.0, 0.5, 0.30000000000000004,\n\t0.4035087719298246, 0.3548387096774194, 0.10416666666666663, 0.0, 0.5625, 0.5178571428571428, 0.6666666666666667, 0.23076923076923073, 0.6521739130434783, 0.6075949367088608,\n\t0.7924528301886793, 0.3157894736842105, 0.375, 0.6923076923076923, 0.676056338028169, 0.0, 0.0, 0.44897959183673475, 0.0, 0.4444444444444444, 0.27419354838709675, 0.5641025641025641,\n\t0.3931623931623932, 0.5555555555555556, 0.49382716049382713, 0.14, 0.3157894736842105, 0.0, 0.4444444444444444, 0.47058823529411764, 0.5, 0.8524590163934427, 0.4117647058823529,\n\t0.22448979591836737, 0.16000000000000003, 0.5, 0.7261904761904762, 0.2857142857142857, 0.5, 0.8305084745762712, 0.5, 0.0, 0.48275862068965514, 0.3571428571428571, 0.5688073394495412,\n\t0.24242424242424243, 0.41666666666666663, 0.6486486486486487, 0.7333333333333334, 0.33333333333333337, 0.33333333333333337, 0.23076923076923073, 0.19047619047619047,\n\t0.16666666666666663, 0.40740740740740744, 0.0, 0.4545454545454546, 0.7371134020618557, 0.2564102564102564, 0.5416666666666667, 0.8, 0.3666666666666667, 0.30508474576271183,\n\t0.4464285714285714, 0.125, 0.3125, 0.24390243902439024, 0.25, 0.7, 0.11538461538461542, 0.4, 0.5, 0.46153846153846156, 0.39534883720930236, 0.2857142857142857, 0.31818181818181823,\n\t0.467741935483871, 0.8253968253968254, 0.01666666666666672, 0.6666666666666667, 0.4444444444444444, 0.4814814814814815, 0.41509433962264153, 0.6, 0.36363636363636365, 0.4387755102040817,\n\t0.6448598130841121, 0.537037037037037, 0.4285714285714286, 0.5377358490566038, 0.2894736842105263, 0.6046511627906976, 0.11111111111111116, 0.6666666666666667, 0.1875, 0.3928571428571429,\n\t0.5, 0.3448275862068966, 0.0, 0.5833333333333333, 0.5178571428571428, 0.30000000000000004, 0.180327868852459, 0.5, 0.42592592592592593, 0.32258064516129037, 0.0, 0.07692307692307687,\n\t0.8888888888888888, 0.6283185840707964, 0.5060240963855422, 0.5185185185185186, 0.6842105263157895, 0.3916666666666667, 0.6486486486486487, 0.43999999999999995, 0.13636363636363635,\n\t0.375, 0.7916666666666666, 0.2941176470588235, 0.5869565217391304, 0.23076923076923073, 0.4444444444444444, 0.3870967741935484, 0.4, 0.5, 0.41666666666666663, 0.5454545454545454,\n\t0.15384615384615385, 0.625, 0.7532467532467533, 0.0, 0.803921568627451, 0.36363636363636365, 0.0, 0.4606741573033708, 0.625, 0.19999999999999996, 0.3829787234042553, 0.6346153846153846,\n\t0.0, 0.2564102564102564, 0.6442307692307692, 0.8780487804878049, 0.37777777777777777, 0.0, 0.4035087719298246, 0.4117647058823529, 0.3584905660377359, 0.28888888888888886,\n\t0.19999999999999996, 0.0, 0.0, 0.25, 0.275, 0.49367088607594933, 0.7555555555555555, 0.5394736842105263, 0.0, 0.5625, 0.33333333333333337, 0.6666666666666667, 0.3555555555555555,\n\t0.5, 0.5163398692810457, 0.125, 0.7659574468085106, 0.33870967741935487, 0.23076923076923073, 0.3375, 0.5217391304347826, 0.5, 0.3137254901960784, 0.5555555555555556, 0.5,\n\t0.5571428571428572, 0.75, 0.25, 0.21621621621621623, 0.21052631578947367, 0.19999999999999996, 0.1428571428571429, 0.5333333333333333, 0.5714285714285714, 0.6, 0.2222222222222222,\n\t0.423728813559322, 0.7714285714285715, 0.14814814814814814, 0.7847222222222222, 0.2272727272727273, 0.21739130434782605, 0.19999999999999996, 0.5555555555555556, 0.0, 0.8775510204081632,\n\t0.3157894736842105, 0.0, 0.6226415094339622, 0.5405405405405406, 0.6341463414634146, 0.6981132075471699, 0.12765957446808507, 0.6818181818181819, 0.6666666666666667, 0.3492063492063492,\n\t0.0, 0.6619718309859155, 0.625, 0.4242424242424242, 0.22916666666666663, 0.5138888888888888, 0.5, 0.3214285714285714, 0.050000000000000044, 0.4626865671641791, 0.2857142857142857,\n\t0.7380952380952381, 0.6153846153846154, 0.3928571428571429, 0.1875, 0.5, 0.09999999999999998, 0.4769230769230769, 0.3522727272727273, 0.6923076923076923, 0.20833333333333337,\n\t0.24489795918367352, 0.0, 0.6326530612244898, 0.625, 0.0, 0.5, 0.5, 0.5686274509803921, 0.0, 0.33333333333333337, 0.6666666666666667, 0.31746031746031744, 0.19047619047619047,\n\t0.7380952380952381, 0.2647058823529411, 0.5714285714285714, 0.5, 0.0, 0.75, 0.09090909090909094, 0.3918918918918919, 0.33333333333333337, 0.5096153846153846, 0.13207547169811318,\n\t0.75, 0.4852941176470589, 0.1428571428571429, 0.49242424242424243, 0.43181818181818177, 0.275, 0.28260869565217395, 0.644927536231884, 0.2682926829268293, 0.0, 0.5, 0.5,\n\t0.430379746835443, 0.4545454545454546, 0.72, 0.4782608695652174, 0.4426229508196722, 0.35593220338983056, 0.38095238095238093, 0.44565217391304346, 0.0, 0.3125, 0.125,\n\t0.5735294117647058, 0.4444444444444444, 0.6458333333333333, 0.4545454545454546, 0.5700000000000001, 0.7758620689655172, 0.37254901960784315, 0.34285714285714286, 0.6153846153846154,\n\t0.5, 0.4545454545454546, 0.4782608695652174, 0.3076923076923077, 0.6611570247933884, 0.4545454545454546, 0.2952380952380952, 0.5, 0.736842105263158, 0.3902439024390244,\n\t0.6142857142857143, 0.41463414634146345, 0.6923076923076923, 0.5333333333333333, 0.40909090909090906, 0.07692307692307687, 0.3292682926829268, 0.5833333333333333, 0.6,\n\t0.4137931034482759, 0.3728813559322034, 0.4444444444444444, 0.33333333333333337, 0.3142857142857143, 0.6984126984126984, 0.0, 0.55, 0.5714285714285714, 0.4639175257731959,\n\t0.13636363636363635, 0.7045454545454546, 0.7037037037037037, 0.6370370370370371, 0.5151515151515151, 0.3529411764705882, 0.4782608695652174, 0.7096774193548387, 0.4, 0.7058823529411764,\n\t0.5416666666666667, 0.5357142857142857, 0.6153846153846154, 0.29166666666666663, 0.6666666666666667, 0.5555555555555556, 0.7184466019417476, 0.47560975609756095, 0.23076923076923073,\n\t0.13888888888888884, 0.4411764705882353, 0.25373134328358204, 0.1707317073170732, 0.5, 0.40384615384615385, 0.6075949367088608, 0.5, 0.5, 0.09090909090909094, 0.0, 0.6, 0.6875,\n\t0.1785714285714286, 0.5444444444444445, 0.33870967741935487, 0.33333333333333337, 0.4369747899159664, 0.28125, 0.64, 0.14893617021276595, 0.44999999999999996, 0.35, 0.6, 0.75,\n\t0.5111111111111111, 0.6, 0.6179775280898876, 0.4590163934426229, 0.29333333333333333, 0.0, 0.31034482758620685, 0.3928571428571429, 0.4, 0.0, 0.6266666666666667, 0.0, 0.7560975609756098,\n\t0.38235294117647056, 0.0, 0.3076923076923077, 0.6701030927835052, 0.2295081967213115, 0.5, 0.2272727272727273, 0.33333333333333337, 0.6857142857142857, 0.42105263157894735,\n\t0.33333333333333337, 0.5384615384615384, 0.5714285714285714, 0.4444444444444444, 0.34328358208955223, 0.4814814814814815, 0.16666666666666663, 0.5535714285714286, 0.19999999999999996,\n\t0.6428571428571428, 0.6857142857142857, 0.125, 0.21999999999999997, 0.8648648648648649, 0.42708333333333337, 0.5833333333333333, 0.5531914893617021, 0.3380281690140845,\n\t0.572463768115942, 0.38888888888888884, 0.5483870967741935, 0.0, 0.5681818181818181, 0.16666666666666663, 0.1333333333333333, 0.5555555555555556, 0.2153846153846154, 0.0,\n\t0.33333333333333337, 0.30379746835443033, 0.75, 0.6714285714285715, 0.5, 0.4534883720930233, 0.19999999999999996, 0.5, 0.0, 0.25, 0.0, 0.25, 0.2692307692307693, 0.18181818181818177,\n\t0.5, 0.30434782608695654, 0.4563106796116505, 0.29807692307692313, 0.4512195121951219, 0.6782608695652175, 0.30000000000000004, 0.6666666666666667, 0.42105263157894735,\n\t0.7674418604651163, 0.0, 0.736842105263158, 0.2727272727272727, 0.3076923076923077, 0.19354838709677424, 0.35, 0.125, 0.5517241379310345, 0.19696969696969702, 0.11111111111111116,\n\t0.4871794871794872, 0.5, 0.6515151515151515, 0.7857142857142857, 0.2692307692307693, 0.26315789473684215, 0.0714285714285714, 0.5757575757575757, 0.6859504132231404,\n\t0.23076923076923073, 0.04347826086956519, 0.35, 0.5609756097560976, 0.22499999999999998, 0.8529411764705882, 0.8, 0.775, 0.5, 0.4095238095238095, 0.6346153846153846, 0.25,\n\t0.33333333333333337, 0.0, 0.6416666666666666, 0.5220588235294117, 0.3770491803278688, 0.5, 0.75, 0.2727272727272727, 0.7173913043478262, 0.34782608695652173, 0.6178343949044586,\n\t0.4, 0.19512195121951215, 0.234375, 0.37142857142857144, 0.6086956521739131, 0.6875, 0.5555555555555556, 0.4130434782608695, 0.3417721518987342, 0.0, 0.5441176470588236,\n\t0.5, 0.4117647058823529, 0.4666666666666667, 0.4411764705882353, 0.48039215686274506, 0.30000000000000004, 0.6363636363636364, 0.6447368421052632, 0.65625, 0.5263157894736843,\n\t0.4516129032258065, 0.1428571428571429, 0.5, 0.23076923076923073, 0.8206521739130435, 0.696969696969697, 0.4794520547945206, 0.6666666666666667, 0.2325581395348837, 0.4571428571428572,\n\t0.6382978723404256, 0.2558139534883721, 0.6753246753246753, 0.5714285714285714, 0.2222222222222222, 0.6785714285714286, 0.33333333333333337, 0.14814814814814814, 0.6136363636363636,\n\t0.4128440366972477, 0.75, 0.5, 0.24390243902439024, 0.16666666666666663, 0.6666666666666667, 0.375, 0.75, 0.65625, 0.5757575757575757, 0.6153846153846154, 0.5, 0.64, 0.08823529411764708,\n\t0.0, 0.7765957446808511, 0.6857142857142857, 0.3157894736842105, 0.2666666666666667, 0.3157894736842105, 0.3666666666666667, 0.576271186440678, 0.7666666666666666, 0.18181818181818177,\n\t0.6, 0.0, 0.17808219178082196, 0.5789473684210527, 0.6666666666666667, 0.0, 0.6774193548387097, 0.6666666666666667, 0.6923076923076923, 0.6666666666666667, 0.0, 0.19444444444444442,\n\t0.5858585858585859, 0.5684210526315789, 0.36, 0.1527777777777778, 0.24561403508771928, 0.38235294117647056, 0.5454545454545454, 0.23809523809523814, 0.25, 0.4, 0.19999999999999996,\n\t0.16176470588235292, 0.5, 0.17647058823529416, 0.4666666666666667, 0.3763440860215054, 0.1428571428571429, 0.47619047619047616, 0.7272727272727273, 0.33333333333333337,\n\t0.618421052631579, 0.34545454545454546, 0.37142857142857144, 0.3833333333333333, 0.3571428571428571, 0.0, 0.4, 0.34693877551020413, 0.5, 0.0, 0.49367088607594933, 0.6610169491525424,\n\t0.1724137931034483, 0.2093023255813954, 0.6, 0.6056338028169015, 0.0, 0.26086956521739135, 0.2222222222222222, 0.25, 0.3939393939393939, 0.3246753246753247, 0.21052631578947367,\n\t0.4782608695652174, 0.2622950819672131, 0.7435897435897436, 0.4444444444444444, 0.6, 0.2727272727272727, 0.4782608695652174, 0.26190476190476186, 0.1875, 0.6018518518518519,\n\t0.3571428571428571, 0.050000000000000044};\n\tPartition C(partition.size());\n\tC.setUpperBound(20);\n\tfor (node u = 0; u < partition.size(); ++u) {\n\t\tC[u] = partition[u];\n\t}\n\tLFRGenerator gen(degreeSequence.size());\n\tgen.setDegreeSequence(degreeSequence);\n\tgen.setPartition(C);\n\tgen.setMu(mu);\n\tgen.run();\n}\n\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6725146174430847, "alphanum_fraction": 0.6818713545799255, "avg_line_length": 16.100000381469727, "blob_id": "0270ec091f12dd5324b0b03a5139e49f738fb97d", "content_id": "dc883cc4a72737f51eba13dd55cd8362397213e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 855, "license_type": "permissive", "max_line_length": 78, "num_lines": 50, "path": "/networkit/cpp/scd/GCE.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/* GCE.h\n *\n * Created on: 06.05.2013\n * Author: cls\n */\n\n\n#ifndef GCE_H_\n#define GCE_H_\n\n#include <unordered_set>\n\n#include \"SelectiveCommunityDetector.h\"\n#include \"../auxiliary/SetIntersector.h\"\n\n\nnamespace NetworKit {\n\n\n/**\n * The Greedy Community Expansion algorithm.\n *\n * Greedily adds nodes from the shell to improve community quality.\n */\nclass GCE: public NetworKit::SelectiveCommunityDetector {\n\npublic:\n\n\tGCE(const Graph& G, std::string objective);\n\n\n\tstd::map<node, std::set<node> > run(std::set<unsigned int>& seeds) override;\n\n\t/**\n\t * @param[in]\ts\tseed node\n\t *\n\t * @param[out]\t\tcommunity as a set of nodes\n\t */\n\tstd::set<node> expandSeed(node s);\n\nprotected:\n\n std::string objective; // name of objective function\n Aux::SetIntersector<node> intersector; // efficient set intersections\n\n\n};\n\n} /* namespace NetworKit */\n#endif\n" }, { "alpha_fraction": 0.6616174578666687, "alphanum_fraction": 0.6667522192001343, "avg_line_length": 27.639705657958984, "blob_id": "43a611c027f72349191caa173b98a36e89481d92", "content_id": "b42c6d703bf5e19fea58d7eb638524dc5ac821e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3895, "license_type": "permissive", "max_line_length": 186, "num_lines": 136, "path": "/networkit/cpp/coarsening/ParallelPartitionCoarsening.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ParallelPartitionCoarsening.cpp\n *\n * Created on: 28.01.2014\n * Author: cls\n */\n\n#include \"ParallelPartitionCoarsening.h\"\n#include <omp.h>\n#include \"../graph/GraphBuilder.h\"\n#include \"../auxiliary/Timer.h\"\n#include \"../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\nParallelPartitionCoarsening::ParallelPartitionCoarsening(const Graph& G, const Partition& zeta, bool useGraphBuilder) : GraphCoarsening(G), zeta(zeta),\tuseGraphBuilder(useGraphBuilder) {\n\n}\n\nvoid ParallelPartitionCoarsening::run() {\n\tAux::Timer timer;\n\ttimer.start();\n\n\tPartition nodeToSuperNode = zeta;\n\tnodeToSuperNode.compact((zeta.upperBound() <= G.upperNodeIdBound())); // use turbo if the upper id bound is <= number of nodes\n\tcount nextNodeId = nodeToSuperNode.upperBound();\n\n\tGraph Gcombined;\n\tif (!useGraphBuilder) {\n\t\tGraph Ginit(nextNodeId, true); // initial graph containing supernodes\n\n\t\t// make copies of initial graph\n\t\tcount nThreads = omp_get_max_threads();\n\t\tstd::vector<Graph> localGraphs(nThreads, Ginit); // thread-local graphs\n\n\n\t\t// iterate over edges of G and create edges in coarse graph or update edge and node weights in Gcon\n\t\tDEBUG(\"create edges in coarse graphs\");\n\t\tG.parallelForEdges([&](node u, node v, edgeweight ew) {\n\t\t\tindex t = omp_get_thread_num();\n\n\t\t\tnode su = nodeToSuperNode[u];\n\t\t\tnode sv = nodeToSuperNode[v];\n\t\t\tlocalGraphs.at(t).increaseWeight(su, sv, ew);\n\n\t\t});\n\n\n\t\tAux::Timer timer2;\n\t\ttimer2.start();\n\t\t// combine local graphs in parallel\n\t\t// Graph Gcombined(Ginit.numberOfNodes(), true); //\n\t\tGcombined = Graph(Ginit.numberOfNodes(), true);\n\n\t\tstd::vector<count> numEdges(nThreads);\n\n\n\t\t// access internals of Graph to write adjacencies\n\t\tauto threadSafeIncreaseWeight = [&](node u, node v, edgeweight ew) {\n\n\t\t\tindex vi = Gcombined.indexInOutEdgeArray(u, v);\n\t\t\tif (vi == none) {\n\t\t\t\tindex t = omp_get_thread_num();\n\t\t\t\tif (u == v) {\n\t\t\t\t\tnumEdges[t] += 2;\n\t\t\t\t} else {\n\t\t\t\t\tnumEdges[t] += 1; // normal edges count half\n\t\t\t\t}\n\t\t\t\tGcombined.outDeg[u]++;\n\t\t\t\tGcombined.outEdges[u].push_back(v);\n\t\t\t\tGcombined.outEdgeWeights[u].push_back(ew);\n\t\t\t} else {\n\t\t\t\tGcombined.outEdgeWeights[u][vi] += ew;\n\t\t\t}\n\n\t\t};\n\n\t\tDEBUG(\"combining graphs\");\n\t\tGcombined.balancedParallelForNodes([&](node u) {\n\t\t\tfor (index l = 0; l < nThreads; ++l) {\n\t\t\t\tlocalGraphs.at(l).forEdgesOf(u, [&](node u, node v, edgeweight w) {\n\t\t\t\t\tTRACE(\"increasing weight of (\", u, v, \") to\", w);\n\t\t\t\t\tthreadSafeIncreaseWeight(u, v, w);\n\t\t\t\t});\n\t\t\t}\n\t\t});\n\n\n\t\t// ensure consistency of data structure\n\t\tDEBUG(\"numEdges: \", numEdges);\n\t\tcount twiceM = std::accumulate(numEdges.begin(), numEdges.end(), 0);\n\t\tassert (twiceM % 2 == 0);\n\t\tGcombined.m = (twiceM / 2);\n\n\t\tassert (Gcombined.checkConsistency());\n\n\t\t// stop both timers before printing\n\t\ttimer2.stop();\n\t\tINFO(\"combining coarse graphs took \", timer2.elapsedTag());\n\t} else {\n\t\tstd::vector< std::vector<node> > nodesPerSuperNode(nextNodeId);\n\t\tG.forNodes([&](node v) {\n\t\t\tnode sv = nodeToSuperNode[v];\n\t\t\tnodesPerSuperNode[sv].push_back(v);\n\t\t});\n\n\t\t// iterate over edges of G and create edges in coarse graph or update edge and node weights in Gcon\n\t\tDEBUG(\"create edges in coarse graphs\");\n\t\tGraphBuilder b(nextNodeId, true, false);\n\t\t#pragma omp parallel for schedule(guided)\n\t\tfor (node su = 0; su < nextNodeId; su++) {\n\t\t\tstd::map<index, edgeweight> outEdges;\n\t\t\tfor (node u : nodesPerSuperNode[su]) {\n\t\t\t\tG.forNeighborsOf(u, [&](node v, edgeweight ew) {\n\t\t\t\t\tnode sv = nodeToSuperNode[v];\n\t\t\t\t\tif (su != sv || u >= v) { // count edges inside uv only once (we iterate over them twice)\n\t\t\t\t\t\toutEdges[sv] += ew;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t\tfor (auto it : outEdges) {\n\t\t\t\tb.addHalfEdge(su, it.first, it.second);\n\t\t\t}\n\t\t}\n\n\t\tGcombined = b.toGraph(false);\n\t}\n\n\ttimer.stop();\n\tINFO(\"parallel coarsening took \", timer.elapsedTag());\n\tGcoarsened = std::move(Gcombined);\n\tnodeMapping = nodeToSuperNode.getVector();\n\thasRun = true;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.695049524307251, "alphanum_fraction": 0.7069306969642639, "avg_line_length": 19.200000762939453, "blob_id": "9ecfb6cec1126a3083c69568c49851955824b650", "content_id": "a2afc0396d4962c32d2dda56b0e9f8cf43341501", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 505, "license_type": "permissive", "max_line_length": 61, "num_lines": 25, "path": "/networkit/cpp/algebraic/test/DenseMatrixGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DenseMatrixGTest.h\n *\n * Created on: Nov 25, 2015\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef NETWORKIT_CPP_ALGEBRAIC_TEST_DENSEMATRIXGTEST_H_\n#define NETWORKIT_CPP_ALGEBRAIC_TEST_DENSEMATRIXGTEST_H_\n\n#include \"gtest/gtest.h\"\n\n#include \"../DenseMatrix.h\"\n#include \"../Vector.h\"\n\nnamespace NetworKit {\n\nclass DenseMatrixGTest : public testing::Test {\n\tDenseMatrixGTest();\n\tvirtual ~DenseMatrixGTest();\n};\n\n} /* namespace NetworKit */\n\n#endif /* NETWORKIT_CPP_ALGEBRAIC_TEST_DENSEMATRIXGTEST_H_ */\n" }, { "alpha_fraction": 0.7361069917678833, "alphanum_fraction": 0.7396730780601501, "avg_line_length": 33.336734771728516, "blob_id": "974b00682ae3ea677a16e77413e34785495d4e5c", "content_id": "61f21afd61d7eb41eee742c98939c45de0990fec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3365, "license_type": "permissive", "max_line_length": 177, "num_lines": 98, "path": "/networkit/cpp/numerics/LAMG/SolverLamg.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SolverLamg.h\n *\n * Created on: 12.01.2015\n * Author: Michael\n */\n\n#ifndef SOLVERLAMG_H_\n#define SOLVERLAMG_H_\n\n#include \"LevelHierarchy.h\"\n#include \"../Smoother.h\"\n#include \"../../algebraic/DenseMatrix.h\"\n\nnamespace NetworKit {\n\n/**\n * Status parameters of the solver.\n */\nstruct LAMGSolverStatus {\n\t// in\n\tcount maxIters = std::numeric_limits<count>::max(); // maximum number of iterations\n\tcount maxConvergenceTime = std::numeric_limits<count>::max(); // maximum time in milliseconds spent to solve the system\n\tdouble desiredResidualReduction = 1e-8; // desired reduction of the initial residual (finalResidual <= desiredResReduction * initialResidual)\n\tcount numPreSmoothIters = 1; // number of pre smoothing iterations\n\tcount numPostSmoothIters = 2; // number of post smoothing iterations\n\n\t// out\n\tcount numIters; // number of iterations needed during solve phase\n\tdouble residual; // absolute final residual\n\tbool converged; // flag of conversion status\n\tstd::vector<double> residualHistory; // history of absolute residuals\n};\n\n/**\n * @ingroup numerics\n * Implements the solve phase of LAMG (Lean Algebraic Multigrid by Livne et al.).\n */\nclass SolverLamg {\nprivate:\n\tLevelHierarchy &hierarchy;\n\tconst Smoother &smoother;\n#ifndef NDEBUG\n\tstatic count minResTime;\n\tstatic count interpolationTime;\n\tstatic count restrictionTime;\n\tstatic count coarsestSolve;\n#endif\n\n\t// data structures for iterate recombination\n\tstd::vector<std::vector<Vector>> history;\n\tstd::vector<std::vector<Vector>> rHistory;\n\tstd::vector<index> latestIterate;\n\tstd::vector<count> numActiveIterates;\n\n\t// bStages for Elimination Levels\n\tstd::vector<std::vector<Vector>> bStages;\n\n\tvoid solveCycle(Vector &x, const Vector &b, int finest, LAMGSolverStatus &status);\n\tvoid cycle(Vector &x, const Vector &b, int finest, int coarsest, std::vector<count> &numVisits, std::vector<Vector> &X, std::vector<Vector> &B, const LAMGSolverStatus &status);\n\tvoid multigridCycle(index level, Vector &xf, const Vector &bf);\n\tvoid saveIterate(index level, const Vector &x, const Vector &r);\n\tvoid clearHistory(index level);\n\tvoid minRes(index level, Vector &x, const Vector &r);\n\npublic:\n\t/**\n\t * Constructs a new solver instance for the specified @a hierarchy. The @a smoother will be used for relaxing and\n\t * solving the coarser solutions.\n\t * @param hierarchy Reference to the LevelHierarchy constructed by MultiLevelSetup.\n\t * @param smoother Reference to a smoother.\n\t */\n\tSolverLamg(LevelHierarchy &hierarchy, const Smoother &smoother);\n\n\tSolverLamg (const SolverLamg &other) = default;\n\n\tSolverLamg (SolverLamg &&other) = default;\n\n\tvirtual ~SolverLamg() = default;\n\n\tSolverLamg& operator=(SolverLamg &&other) = default;\n\n\tSolverLamg& operator=(const SolverLamg &other) = default;\n\n\t/**\n\t * Solves the system A*x = b for the given initial @a x and right-hand side @a b. More parameters can be specified\n\t * in @a status and additional output is also stored in @a status. After the solver finished, the approximate\n\t * solution is stored in @a x.\n\t * @param x[out] Reference to the initial guess to the solution and the approximation after the solver finished.\n\t * @param b The right-hand side vector.\n\t * @param status Reference to an LAMGSolverStatus.\n\t */\n\tvoid solve(Vector &x, const Vector &b, LAMGSolverStatus &status);\n};\n\n} /* namespace NetworKit */\n\n#endif /* SOLVERLAMG_H_ */\n" }, { "alpha_fraction": 0.6961602568626404, "alphanum_fraction": 0.7095158696174622, "avg_line_length": 18.322580337524414, "blob_id": "3498c0fbd18beb96a57c0ef98a7be98bd09a09ab", "content_id": "4a2729d1902bf74aeeada45c8ad84e5c3ecea2dc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 599, "license_type": "permissive", "max_line_length": 68, "num_lines": 31, "path": "/networkit/cpp/algebraic/NormalizedLaplacianMatrix.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * NormalizedLaplacianMatrix.h\n *\n * Created on: 20.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef NORMALIZEDLAPLACIANMATRIX_H_\n#define NORMALIZEDLAPLACIANMATRIX_H_\n\n#include \"../graph/Graph.h\"\n#include <cmath>\n#include \"Matrix.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup algebraic\n * Normalized laplacian matrix of a Graph.\n */\nclass NormalizedLaplacianMatrix : public Matrix {\npublic:\n\t/**\n\t * Constructs the NormalizedLaplacianMatrix for the given @a graph.\n\t */\n\tNormalizedLaplacianMatrix(const Graph &graph);\n};\n\n} /* namespace NetworKit */\n\n#endif /* NORMALIZEDLAPLACIANMATRIX_H_ */\n" }, { "alpha_fraction": 0.6739130616188049, "alphanum_fraction": 0.695652186870575, "avg_line_length": 13.720000267028809, "blob_id": "f9606466b0d1e3383a6b5e94978542dd0bd3db4b", "content_id": "11c90255344a6d5262c665f8f78884e16028c4c4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 368, "license_type": "permissive", "max_line_length": 52, "num_lines": 25, "path": "/networkit/cpp/sparsification/test/SimmelianBackboneGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SimmelianBackboneGTest.h\n *\n * Created on: 31.05.2014\n * Author: Gerd Lindner\n */\n\n#ifndef NOGTEST\n\n#ifndef SIMMELIANBACKBONETEST_H_\n#define SIMMELIANBACKBONETEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass SimmelianBackboneGTest: public testing::Test {\n\n};\n\n\n} /* namespace NetworKit */\n#endif /* SIMMELIANBACKBONETEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6767441630363464, "alphanum_fraction": 0.6860465407371521, "avg_line_length": 19.4761905670166, "blob_id": "fa619da1da27de382aa3c5d0ec036af094c189d2", "content_id": "499a5b463736202b682ad4626c0375ec1c7579fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 860, "license_type": "permissive", "max_line_length": 119, "num_lines": 42, "path": "/networkit/cpp/numerics/LAMG/Level/EliminationStage.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * EliminationStage.h\n *\n * Created on: 09.01.2015\n * Author: Michael\n */\n\n#ifndef ELIMINATIONSTAGE_H_\n#define ELIMINATIONSTAGE_H_\n\n#include \"../../../algebraic/CSRMatrix.h\"\n#include \"../../../algebraic/Vector.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup numerics\n */\nclass EliminationStage {\nprivate:\n\tCSRMatrix P; // interpolation matrix\n\tCSRMatrix R;\n\tVector q; // coarse result correction vector\n\tstd::vector<index> fSet;\n\tstd::vector<index> cSet;\n\npublic:\n\tEliminationStage(const CSRMatrix &P, const Vector &q, const std::vector<index> &fSet, const std::vector<index> &cSet);\n\n\tconst CSRMatrix& getP() const;\n\tconst CSRMatrix& getR() const;\n \tconst Vector& getQ() const;\n\tconst std::vector<index>& getFSet() const;\n\tconst std::vector<index>& getCSet() const;\n\n\tcount getN() const;\n\n};\n\n} /* namespace NetworKit */\n\n#endif /* ELIMINATIONSTAGE_H_ */\n" }, { "alpha_fraction": 0.721084475517273, "alphanum_fraction": 0.7263908386230469, "avg_line_length": 28.70689582824707, "blob_id": "33cfca6a07ffc0c182abda30e93cbb48787e7ece", "content_id": "0020efaf9638e87fd59e7a886c6d803d7782e3c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12061, "license_type": "permissive", "max_line_length": 101, "num_lines": 406, "path": "/networkit/test/test_algorithms.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "#! /usr/bin/python\n\nimport unittest\nimport os\n\nfrom networkit import *\n\n\nclass Test_SelfLoops(unittest.TestCase):\n\n\tdef checkCovers(self, c1, c2):\n\t\tif not c1.numberOfElements() == c2.numberOfElements(): return False\n\t\tif not c1.numberOfSubsets() == c2. numberOfSubsets(): return False\n\t\tfor i in range(0,c1.numberOfElements()):\n\t\t\tif not c1.subsetsOf(i) == c2.subsetsOf(i): return False\n\t\treturn True\n\n\tdef setUp(self):\n\t\t# toggle the comment/uncomment to test on small or large test cases\n\t\t#self.L = readGraph(\"PGPgiantcompo.graph\", Format.METIS) #without self-loops\n\t\t#self.LL = readGraph(\"PGPConnectedCompoLoops.gml\", Format.GML) #with self-loops sprinkled in\n\t\tself.L = readGraph(\"input/looptest1.gml\", Format.GML) #without self-loops\n\t\tself.LL = readGraph(\"input/looptest2.gml\", Format.GML) #with self-loops sprinkled in\n\n\tdef test_centrality_Betweenness(self):\n\t\tCL = centrality.Betweenness(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.Betweenness(self.LL)\n\t\tCLL.run()\n\t\tself.assertEqual(CL.ranking(), CLL.ranking())\n\n\tdef test_centrality_ApproxBetweenness(self):\n\t\tCL = centrality.ApproxBetweenness(self.L, epsilon=0.01, delta=0.1)\n\t\tCL.run()\n\t\tCLL = centrality.ApproxBetweenness(self.LL, epsilon=0.01, delta=0.1)\n\t\tCLL.run()\n\t\t#test if lists have the same length\n\t\tself.assertEqual(len(CL.ranking()),len(CLL.ranking()))\n\t\tfor i in range(len(CL.ranking())):\n\t\t\tself.assertAlmostEqual(CL.ranking()[i][1], CLL.ranking()[i][1], delta=0.2*CL.ranking()[i][1])\n\n\n\tdef test_centrality_Closeness(self):\n\t\tCL = centrality.Closeness(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.Closeness(self.LL)\n\t\tCLL.run()\n\t\tself.assertEqual(CL.ranking(), CLL.ranking())\n\n\n\tdef test_centrality_CoreDecomposition(self):\n\t\tCL = centrality.CoreDecomposition(self.L)\n\t\tCL.run()\n\t\ttry:\n\t\t\tCLL = centrality.CoreDecomposition(self.LL)\n\t\texcept RuntimeError:\n\t\t\timport copy\n\t\t\ttmp = copy.deepcopy(self.LL)\n\t\t\ttmp.removeSelfLoops()\n\t\t\tCLL = centrality.CoreDecomposition(tmp)\n\t\t\tCLL.run()\n\t\t\tself.assertTrue(self.checkCovers(CL.getCover(),CLL.getCover()))\n\n\n\tdef test_centrality_EigenvectorCentrality(self):\n\t\tCL = centrality.EigenvectorCentrality(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.EigenvectorCentrality(self.LL)\n\t\tCLL.run()\n\t\t#test if lists have the same length\n\t\tself.assertEqual(len(CL.ranking()),len(CLL.ranking()))\n\n\n\tdef test_centrality_KPathCentrality(self):\n\t\tCL = centrality.KPathCentrality(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.KPathCentrality(self.LL)\n\t\tCLL.run()\n\t\t#test if lists have the same length\n\t\tself.assertEqual(len(CL.ranking()),len(CLL.ranking()))\n\n\n\tdef test_centrality_KatzCentrality(self):\n\t\tCL = centrality.KatzCentrality(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.KatzCentrality(self.LL)\n\t\tCLL.run()\n\t\t#test if lists have the same length\n\t\tself.assertEqual(len(CL.ranking()),len(CLL.ranking()))\n\n\n\tdef test_centrality_PageRank(self):\n\t\tCL = centrality.PageRank(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.PageRank(self.LL)\n\t\tCLL.run()\n\t\t#test if lists have the same length\n\t\tself.assertEqual(len(CL.ranking()),len(CLL.ranking()))\n\n\n\tdef test_centrality_rankPerNode(self):\n\t\tCL = centrality.PageRank(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.PageRank(self.LL)\n\t\tCLL.run()\n\t\t#test if list of pairs and list of ranks have the same length\n\t\tself.assertEqual(len(CL.ranking()),len(centrality.rankPerNode(CL.ranking())))\n\t\tself.assertEqual(len(CLL.ranking()),len(centrality.rankPerNode(CLL.ranking())))\n\n\n\tdef test_centrality_SciPyPageRank(self):\n\t\tCL = centrality.SciPyPageRank(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.SciPyPageRank(self.LL)\n\t\tCLL.run()\n\t\t#test if lists have the same length\n\t\tself.assertEqual(len(CL.ranking()),len(CLL.ranking()))\n\n\n\tdef test_centrality_SciPyEVZ(self):\n\t\tCL = centrality.SciPyEVZ(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.SciPyEVZ(self.LL)\n\t\tCLL.run()\n\t\t#test if lists have the same length\n\t\tself.assertEqual(len(CL.ranking()),len(CLL.ranking()))\n\n\tdef test_centrality_relativeRankErrors(self):\n\t\tCL = centrality.Betweenness(self.L)\n\t\tCL.run()\n\t\tCLL = centrality.Betweenness(self.LL)\n\t\tCLL.run()\n\t\tself.assertEqual(len(CL.ranking()), len(centrality.relativeRankErrors(CL.ranking(),CLL.ranking())))\n\n\n\tdef test_community_PLM(self):\n\t\tPLML = community.PLM(self.L)\n\t\tPLMLL = community.PLM(self.LL)\n\t\tPLML.run()\n\t\tPLMLL.run()\n\t\tPLMLP = PLML.getPartition()\n\t\tPLMLLP = PLMLL.getPartition()\n\t\tself.assertIsNot(PLMLP.getSubsetIds(), None)\n\t\tself.assertIsNot(PLMLLP.getSubsetIds(), None)\n\t\t# test if partitions add up to original set\n\t\treconstructedSet = []\n\t\tfor i in PLMLP.getSubsetIds():\n\t\t\tfor j in PLMLP.getMembers(i):\n\t\t\t\treconstructedSet.append(j)\n\t\tself.assertEqual(set(self.L.nodes()), set(reconstructedSet))\n\t\treconstructedSet = []\n\t\tfor i in PLMLLP.getSubsetIds():\n\t\t\tfor j in PLMLLP.getMembers(i):\n\t\t\t\treconstructedSet.append(j)\n\t\tself.assertEqual(set(self.LL.nodes()), set(reconstructedSet))\n\n\tdef test_community_PLP(self):\n\t\tPLPL = community.PLP(self.L)\n\t\tPLPLL = community.PLP(self.LL)\n\t\tPLPL.run()\n\t\tPLPLL.run()\n\t\tPLPLP = PLPL.getPartition()\n\t\tPLPLLP = PLPLL.getPartition()\n\t\tself.assertIsNot(PLPLP.getSubsetIds(), None)\n\t\tself.assertIsNot(PLPLLP.getSubsetIds(), None)\n\t\t# test if partitions add up to original set\n\t\treconstructedSet = []\n\t\tfor i in PLPLP.getSubsetIds():\n\t\t\tfor j in PLPLP.getMembers(i):\n\t\t\t\treconstructedSet.append(j)\n\t\tself.assertEqual(set(self.L.nodes()), set(reconstructedSet))\n\t\treconstructedSet = []\n\t\tfor i in PLPLLP.getSubsetIds():\n\t\t\tfor j in PLPLLP.getMembers(i):\n\t\t\t\treconstructedSet.append(j)\n\t\tself.assertEqual(set(self.LL.nodes()), set(reconstructedSet))\n\n\n\tdef test_community_CutClustering(self):\n\t\tCL = community.CutClustering(self.L, 0.2)\n\t\tCLL = community.CutClustering(self.LL, 0.2)\n\t\tCL.run()\n\t\tCLL.run()\n\t\tCLP = CL.getPartition()\n\t\tCLLP = CLL.getPartition()\n\t\tself.assertIsNot(CLP.getSubsetIds(), None)\n\t\tself.assertIsNot(CLLP.getSubsetIds(), None)\n\t\t# test if partitions add up to original set\n\t\treconstructedSet = []\n\t\tfor i in CLP.getSubsetIds():\n\t\t\tfor j in CLP.getMembers(i):\n\t\t\t\treconstructedSet.append(j)\n\t\tself.assertEqual(set(self.L.nodes()), set(reconstructedSet))\n\t\treconstructedSet = []\n\t\tfor i in CLLP.getSubsetIds():\n\t\t\tfor j in CLLP.getMembers(i):\n\t\t\t\treconstructedSet.append(j)\n\t\tself.assertEqual(set(self.LL.nodes()), set(reconstructedSet))\n\n\n\tdef test_community_GraphClusteringTools(self):\n\t\tPLMLL = community.PLM(self.LL)\n\t\tPLMLL.run()\n\t\tPLMLLP = PLMLL.getPartition()\n\t\tPLPLL = community.PLP(self.LL)\n\t\tPLPLL.run()\n\t\tPLPLLP = PLPLL.getPartition()\n\t\tGCT = community.GraphClusteringTools()\n\t\tself.assertIsInstance(GCT.equalClustering(PLMLLP,PLPLLP, self.LL), bool)\n\t\tself.assertIsInstance(GCT.getImbalance(PLMLLP), float)\n\t\tself.assertIsInstance(GCT.isOneClustering(self.LL, PLPLLP), bool)\n\t\tself.assertIsInstance(GCT.isProperClustering(self.LL, PLMLLP), bool)\n\t\tself.assertIsInstance(GCT.isSingletonClustering(self.LL, PLPLLP), bool)\n\n\n\tdef test_community_GraphStructuralRandMeasure(self):\n\t\tPLMLL = community.PLM(self.LL)\n\t\tPLMLL.run()\n\t\tPLMLLP = PLMLL.getPartition()\n\t\tPLPLL = community.PLP(self.LL)\n\t\tPLPLL.run()\n\t\tPLPLLP = PLPLL.getPartition()\n\t\tGSRM = community.GraphStructuralRandMeasure()\n\t\tself.assertAlmostEqual(GSRM.getDissimilarity(self.LL, PLMLLP, PLPLLP),0.5, delta=0.5 )\n\n\n\tdef test_community_Hubdominance(self):\n\t\tPLMLL = community.PLM(self.LL)\n\t\tPLMLL.run()\n\t\tPLMLLP = PLMLL.getPartition()\n\t\tHD = community.HubDominance()\n\t\tself.assertIsInstance(HD.getQuality(PLMLLP, self.LL),float )\n\n\n\tdef test_community_JaccardMeasure(self):\n\t\tPLMLL = community.PLM(self.LL)\n\t\tPLMLL.run()\n\t\tPLMLLP = PLMLL.getPartition()\n\t\tPLPLL = community.PLP(self.LL)\n\t\tPLPLL.run()\n\t\tPLPLLP = PLPLL.getPartition()\n\t\tJM = community.JaccardMeasure()\n\t\tself.assertIsInstance(JM.getDissimilarity(self.LL, PLMLLP, PLPLLP),float)\n\n\n\tdef test_community_LPDegreeOrdered(self):\n\t\tCL = community.LPDegreeOrdered(self.L)\n\t\tCLL = community.LPDegreeOrdered(self.LL)\n\t\tCL.run()\n\t\tCLL.run()\n\t\tCLP = CL.getPartition()\n\t\tCLLP = CLL.getPartition()\n\t\tself.assertIsNot(CLP.getSubsetIds(), None)\n\t\tself.assertIsNot(CLLP.getSubsetIds(), None)\n\t\t# test if partitions add up to original set\n\t\treconstructedSet = []\n\t\tfor i in CLP.getSubsetIds():\n\t\t\tfor j in CLP.getMembers(i):\n\t\t\t\treconstructedSet.append(j)\n\t\tself.assertEqual(set(self.L.nodes()), set(reconstructedSet))\n\t\treconstructedSet = []\n\t\tfor i in CLLP.getSubsetIds():\n\t\t\tfor j in CLLP.getMembers(i):\n\t\t\t\treconstructedSet.append(j)\n\t\tself.assertEqual(set(self.LL.nodes()), set(reconstructedSet))\n\n\tdef test_community_Modularity(self):\n\t\tPLPLL = community.PLP(self.LL)\n\t\tPLPLL.run()\n\t\tPLPLLP = PLPLL.getPartition()\n\t\tMod = community.Modularity()\n\t\tself.assertAlmostEqual(Mod.getQuality(PLPLLP, self.LL),0.25, delta=0.75)\n\n\n\tdef test_community_NMIDistance(self):\n\t\tPLMLL = community.PLM(self.LL)\n\t\tPLMLL.run()\n\t\tPLMLLP = PLMLL.getPartition()\n\t\tPLPLL = community.PLP(self.LL)\n\t\tPLPLL.run()\n\t\tPLPLLP = PLPLL.getPartition()\n\t\tNMI = community.NMIDistance()\n\t\tself.assertIsInstance(NMI.getDissimilarity(self.LL, PLMLLP, PLPLLP),float)\n\n\n\tdef test_community_NodeStructuralRandMeasure(self):\n\t\tPLMLL = community.PLM(self.LL)\n\t\tPLMLL.run()\n\t\tPLMLLP = PLMLL.getPartition()\n\t\tPLPLL = community.PLP(self.LL)\n\t\tPLPLL.run()\n\t\tPLPLLP = PLPLL.getPartition()\n\t\tNSRM = community.NodeStructuralRandMeasure()\n\t\tself.assertAlmostEqual(NSRM.getDissimilarity(self.LL, PLMLLP, PLPLLP),0.5, delta=0.5 )\n\n\n\tdef test_community_communityGraph(self):\n\t\tPLMLL = community.PLM(self.LL)\n\t\tPLMLL.run()\n\t\tPLMLLP = PLMLL.getPartition()\n\t\tCG = community.communityGraph(self.LL, PLMLLP)\n\t\tself.assertIsInstance(len(CG.nodes()), int)\n\n\n\tdef test_community_evaluateCommunityDetection(self):\n\t\tPLMLL = community.PLM(self.LL)\n\t\tcommunity.evalCommunityDetection(PLMLL, self.LL)\n\n\n\tdef test_community_kCoreCommunityDetection(self):\n\t\twith self.assertRaises(RuntimeError) as cm:\n\t\t\tkCCD = community.kCoreCommunityDetection(self.LL, 1, inspect=False)\n\n\n\tdef test_flow_EdmondsKarp(self):\n\t\tself.L.indexEdges()\n\t\tself.LL.indexEdges()\n\t\tr1 = self.L.randomNode()\n\t\tr2 = self.L.randomNode()\n\t\twhile r1 is r2:\n\t\t\tr2 = self.L.randomNode()\n\t\tEKL = flow.EdmondsKarp(self.L, r1, r2)\n\t\tEKLL = flow.EdmondsKarp(self.LL, r1, r2)\n\t\tEKL.run()\n\t\tEKLL.run()\n\n\n\tdef test_clique_MaxClique(self):\n\t\tclique.MaxClique(self.LL).run()\n\n\n\tdef test_globals_ClusteringCoefficient(self):\n\t\tCL = globals.ClusteringCoefficient()\n\t\tCL.exactGlobal(self.L)\n\t\tCL.exactGlobal(self.LL)\n\t\tCL.approxGlobal(self.L, 5)\n\t\tCL.approxGlobal(self.LL, 5)\n\t\tCL.approxAvgLocal(self.L, 5)\n\t\tCL.approxAvgLocal(self.LL, 5)\n\t\tCL.avgLocal(self.L)\n\t\twith self.assertRaises(RuntimeError) as cm:\n\t\t\tCL.avgLocal(self.LL)\n\t\tCL.sequentialAvgLocal(self.L)\n\t\tCL.sequentialAvgLocal(self.LL)\n\n\n\tdef test_components_ConnectedComponents(self):\n\t\tCC = components.ConnectedComponents(self.LL)\n\t\tCC.run()\n\t\tCC.componentOfNode(1)\n\t\tCC.getComponentSizes()\n\t\tCC.getPartition()\n\t\tCC.numberOfComponents()\n\n\n\tdef test_distance_Diameter(self):\n\t\tD = distance.Diameter(self.LL, distance.DiameterAlgo.EstimatedRange, error = 0.1)\n\t\tD.run()\n\t\tD = distance.Diameter(self.LL, distance.DiameterAlgo.EstimatedSamples, nSamples = 5)\n\t\tD.run()\n\t\tD = distance.Diameter(self.LL, distance.DiameterAlgo.Exact)\n\t\tD.run()\n\n\n\tdef test_distance_Eccentricity(self):\n\t\tE = distance.Eccentricity()\n\t\tE.getValue(self.LL, 0)\n\n\n\tdef test_distance_EffectiveDiameter(self):\n\t\talgo = distance.EffectiveDiameter(self.L)\n\t\talgo.run()\n\t\talgo = distance.EffectiveDiameter(self.LL)\n\t\talgo.run()\n\n\n\tdef test_distance_ApproxEffectiveDiameter(self):\n\t\talgo = distance.ApproxEffectiveDiameter(self.L)\n\t\talgo.run()\n\t\talgo = distance.ApproxEffectiveDiameter(self.LL)\n\t\talgo.run()\n\n\n\tdef test_distance_ApproxHopPlot(self):\n\t\talgo = distance.ApproxHopPlot(self.L)\n\t\talgo.run()\n\t\talgo = distance.ApproxHopPlot(self.LL)\n\t\talgo.run()\n\n\n\tdef test_distance_NeighborhoodFunction(self):\n\t\talgo = distance.NeighborhoodFunction(self.L)\n\t\talgo.run()\n\t\talgo = distance.NeighborhoodFunction(self.LL)\n\t\talgo.run()\n\n\n\tdef test_distance_ApproxNeighborhoodFunction(self):\n\t\talgo = distance.ApproxNeighborhoodFunction(self.L)\n\t\talgo.run()\n\t\talgo = distance.ApproxNeighborhoodFunction(self.LL)\n\t\talgo.run()\n\n\nif __name__ == \"__main__\":\n\tunittest.main()\n" }, { "alpha_fraction": 0.7070484757423401, "alphanum_fraction": 0.7151248455047607, "avg_line_length": 20.619047164916992, "blob_id": "741bfac894852745b31323af6c5a1c8826deabd7", "content_id": "752f6271578abce4d10895fbe53ff51ca1b60909", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1362, "license_type": "permissive", "max_line_length": 158, "num_lines": 63, "path": "/networkit/cpp/viz/FruchtermanReingold.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ForceDirected.h\n *\n * Created on: Apr 11, 2013\n * Author: Henning\n */\n\n#ifndef FORCEDIRECTED_H_\n#define FORCEDIRECTED_H_\n\n#include \"Layouter.h\"\n#include \"Point.h\"\n#include \"PostscriptWriter.h\"\n\n#include <vector>\n#include <cmath>\n\n\nnamespace NetworKit {\n\nconst count MAX_ITER = 300;\nconst double EPS = 0.1;\n\n/**\n * @ingroup viz\n * Fruchterman-Reingold graph drawing algorithm. We mostly follow\n * the description in Stephen G. Kobourov: Spring Embedders and Force\n * Directed Graph Drawing Algorithms.\n */\n\n// TODO: refactor to inherit from LayoutAlgorithm base class\nclass FruchtermanReingold: public NetworKit::Layouter {\nprivate:\n\tstatic const float INITIAL_STEP_LENGTH;\n\tstatic const float OPT_PAIR_SQR_DIST_SCALE;\n\n\tcount maxIter;\n\tfloat prec;\n\tfloat step;\n\npublic:\n\n\t/**\n\t * Constructor. DO NOT use for creating objects, only needed for\n\t * Python shell.\n\t */\n\tFruchtermanReingold() {}\n\n\t/**\n\t * Constructor.\n\t * @param[in] bottomLeft Coordinate of point in bottom/left corner\n\t * @param[in] topRight Coordinate of point in top/right corner\n\t */\n\tFruchtermanReingold(Point<float> bottomLeft, Point<float> topRight, bool useGivenCoordinates = false, count maxIterations = MAX_ITER, float precision = EPS);\n\n\t/**\n\t * Assigns coordinates to vertices in graph @a g\n\t */\n\tvirtual void draw(Graph& g);\n};\n\n} /* namespace NetworKit */\n#endif /* FORCEDIRECTED_H_ */\n" }, { "alpha_fraction": 0.6604985594749451, "alphanum_fraction": 0.6658956408500671, "avg_line_length": 28.702289581298828, "blob_id": "9e60e19cb94ce1adaec116bbc6d3d1e3dbdf3aae", "content_id": "e36f24cb2b543921841f11de2f3cb2e1f9e72962", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3891, "license_type": "permissive", "max_line_length": 283, "num_lines": 131, "path": "/networkit/cpp/centrality/ApproxBetweenness.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * ApproxBetweenness.cpp\n *\n * Created on: 09.04.2014\n * Author: cls\n */\n\n#include \"ApproxBetweenness.h\"\n#include \"../auxiliary/Random.h\"\n#include \"../distance/Diameter.h\"\n#include \"../graph/Sampling.h\"\n#include \"../graph/Dijkstra.h\"\n#include \"../graph/BFS.h\"\n#include \"../graph/SSSP.h\"\n#include \"../auxiliary/Log.h\"\n#include \"../auxiliary/SignalHandling.h\"\n\n#include <math.h>\n#include <algorithm>\n#include <memory>\n#include <omp.h>\n\nnamespace NetworKit {\n\nApproxBetweenness::ApproxBetweenness(const Graph& G, const double epsilon, const double delta, const count diameterSamples, const double universalConstant) : Centrality(G, true), epsilon(epsilon), delta(delta), diameterSamples(diameterSamples), universalConstant(universalConstant) {\n\n}\n\n\nvoid ApproxBetweenness::run() {\n\tAux::SignalHandler handler;\n\tscoreData.clear();\n\tscoreData.resize(G.upperNodeIdBound());\n\n\tedgeweight vd = 0;\n\tif (diameterSamples == 0) {\n\t\tINFO(\"estimating vertex diameter pedantically\");\n\t\tDiameter diam(G, DiameterAlgo::estimatedPedantic);\n\t\tdiam.run();\n\t\tvd = diam.getDiameter().first;\n\t} else {\n\t\t/**\n\t\t* This is an optimization which deviates from the original algorithm.\n\t\t* Instead of getting an estimate for each of possibly thousands of connected component and taking the maximum,\n\t\t* we sample the graph and take the maximum diameter found. This has a high chance of hitting the component with the maximum vertex diameter.\n\t\t*/\n\t\tINFO(\"estimating vertex diameter roughly\");\n\t\tDiameter diam(G, DiameterAlgo::estimatedSamples, -1.f, diameterSamples);\n\t\tdiam.run();\n\t\tvd = diam.getDiameter().first;\n\t}\n\n\tINFO(\"estimated diameter: \", vd);\n\tr = ceil((universalConstant / (epsilon * epsilon)) * (floor(log2(vd - 2)) + 1 - log(delta)));\n\n\tINFO(\"taking \", r, \" path samples\");\n\t// parallelization:\n\tcount maxThreads = omp_get_max_threads();\n\tDEBUG(\"max threads: \", maxThreads);\n\tstd::vector<std::vector<double> > scorePerThread(maxThreads, std::vector<double>(G.upperNodeIdBound()));\n\tDEBUG(\"score per thread size: \", scorePerThread.size());\n\thandler.assureRunning();\n\t#pragma omp parallel for\n\tfor (count i = 1; i <= r; i++) {\n\t\tcount thread = omp_get_thread_num();\n\t\tDEBUG(\"sample \", i);\n\t\t// if (i >= 1000) throw std::runtime_error(\"too many iterations\");\n\t\t// DEBUG\n\t\t// sample random node pair\n\t\tnode u, v;\n\t\tu = Sampling::randomNode(G);\n\t\tdo {\n\t\t\tv = Sampling::randomNode(G);\n\t\t} while (v == u);\n\n\t\t// runs faster for unweighted graphs\n\t\tstd::unique_ptr<SSSP> sssp;\n\t\tif (G.isWeighted()) {\n\t\t\tsssp.reset(new Dijkstra(G, u, true, false, v));\n\t\t} else {\n\t\t\tsssp.reset(new BFS(G, u, true, false, v));\n\t\t}\n\t\tDEBUG(\"running shortest path algorithm for node \", u);\n\t\tif (!handler.isRunning()) continue;\n\t\tsssp->run();\n\t\tif (!handler.isRunning()) continue;\n\t\tif (sssp->numberOfPaths(v) > 0) { // at least one path between {u, v} exists\n\t\t\tDEBUG(\"updating estimate for path \", u, \" <-> \", v);\n\t\t\t// random path sampling and estimation update\n\t\t\t// node s = v;\n\t\t\tnode t = v;\n\t\t\twhile (t != u) {\n\t\t\t\t// sample z in P_u(t) with probability sigma_uz / sigma_us\n\t\t\t\tstd::vector<std::pair<node, double> > choices;\n\t\t\t\tfor (node z : sssp->getPredecessors(t)) {\n\t\t\t\t\tbigfloat tmp = sssp->numberOfPaths(z) / sssp->numberOfPaths(t);\n\t\t\t\t\tdouble weight;\n\t\t\t\t\ttmp.ToDouble(weight);\n\t\t\t\t\tchoices.emplace_back(z, weight); \t// sigma_uz / sigma_us\n\t\t\t\t}\n\t\t\t\tnode z = Aux::Random::weightedChoice(choices);\n\t\t\t\tassert (z <= G.upperNodeIdBound());\n\t\t\t\tif (z != u) {\n\t\t\t\t\tscorePerThread[thread][z] += 1 / (double) r;\n\t\t\t\t}\n\t\t\t\t// s = t;\n\t\t\t\tt = z;\n\t\t\t}\n\t\t}\n\t}\n\thandler.assureRunning();\n\n\tINFO(\"adding thread-local scores\");\n\t// add up all thread-local values\n\tfor (auto &local : scorePerThread) {\n\t\tG.parallelForNodes([&](node v){\n\t\t\tscoreData[v] += local[v];\n\t\t});\n\t}\n\n\thasRun = true;\n}\n\n\ncount ApproxBetweenness::numberOfSamples() {\n\tINFO(\"Estimated number of samples\", r);\n\treturn r;\n}\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6397849321365356, "alphanum_fraction": 0.6612903475761414, "avg_line_length": 15.909090995788574, "blob_id": "540cf334a3f72b43710bd3a8e70c0d5061154083", "content_id": "554a28ecced760a7f889f2a065541d486804189d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 372, "license_type": "permissive", "max_line_length": 47, "num_lines": 22, "path": "/networkit/cpp/flow/test/EdmondsKarpGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * EdmondsKarpGTest.h\n *\n * Created on: 13.06.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef EDMONDSKARPGTEST_H_\n#define EDMONDSKARPGTEST_H_\n\n#include \"gtest/gtest.h\"\n#include \"../EdmondsKarp.h\"\n#include \"../../graph/Graph.h\"\n\nnamespace NetworKit {\n\nclass EdmondsKarpGTest : public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* EDMONDSKARPGTEST_H_ */\n" }, { "alpha_fraction": 0.6496350169181824, "alphanum_fraction": 0.6642335653305054, "avg_line_length": 15.606060981750488, "blob_id": "62b65314976e492ce507d13526217e23c9f8f55a", "content_id": "7799ed040d03e14fc1d39cf8f79907127b1f2475", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 548, "license_type": "permissive", "max_line_length": 47, "num_lines": 33, "path": "/networkit/cpp/algebraic/IncidenceMatrix.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * IncidenceMatrix.h\n *\n * Created on: 21.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef INCIDENCEMATRIX_H_\n#define INCIDENCEMATRIX_H_\n\n#include \"../graph/Graph.h\"\n#include \"Matrix.h\"\n#include \"Vector.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup algebraic\n * Incidence matrix of a Graph.\n */\nclass IncidenceMatrix : public Matrix {\n\ttypedef std::pair<node, node> Edge;\n\npublic:\n\t/**\n\t * Constructs the IncidenceMatrix of @a graph.\n\t */\n\tIncidenceMatrix(const Graph &graph);\n};\n\n} /* namespace NetworKit */\n\n#endif /* INCIDENCEMATRIX_H_ */\n" }, { "alpha_fraction": 0.6480769515037537, "alphanum_fraction": 0.6634615659713745, "avg_line_length": 14.757575988769531, "blob_id": "afcaf34f148cc95a7e523b65f89c2fcabb26ec93", "content_id": "4cca801d53934b8ddd623352d814b2e999dffb91", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 520, "license_type": "permissive", "max_line_length": 58, "num_lines": 33, "path": "/networkit/cpp/algebraic/LaplacianMatrix.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LaplacianMatrix.h\n *\n * Created on: 20.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#ifndef LAPLACIANMATRIX_H_\n#define LAPLACIANMATRIX_H_\n\n#include \"../graph/Graph.h\"\n#include <cmath>\n#include \"Matrix.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup algebraic\n * Laplacian matrix of a Graph.\n */\nclass LaplacianMatrix : public Matrix {\npublic:\n\t/**\n\t * Constructs the LaplacianMatrix for the given @a graph.\n\t */\n\tLaplacianMatrix(const Graph &graph);\n\n};\n\n\n} /* namespace NetworKit */\n\n#endif /* LAPLACIANMATRIX_H_ */\n" }, { "alpha_fraction": 0.6738055944442749, "alphanum_fraction": 0.6902800798416138, "avg_line_length": 21.481481552124023, "blob_id": "2bbeb10ff0f8e57450395a87b8367e61aa938c03", "content_id": "e504f98efb49418761d06881aecb76c04d6bb06a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 607, "license_type": "permissive", "max_line_length": 143, "num_lines": 27, "path": "/networkit/cpp/numerics/LAMG/Level/LevelAggregation.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * LevelAggregation.cpp\n *\n * Created on: 10.01.2015\n * Author: Michael\n */\n\n#include \"LevelAggregation.h\"\n\nnamespace NetworKit {\n\nLevelAggregation::LevelAggregation(const CSRMatrix &A, const CSRMatrix &P, const CSRMatrix &R) : Level(LevelType::AGGREGATION, A), P(P), R(R) {\n}\n\nvoid LevelAggregation::coarseType(const Vector &xf, Vector &xc) const {\n\txc = Vector(P.numberOfColumns(), 0.0);\n}\n\nvoid LevelAggregation::restrict(const Vector &bf, Vector &bc) const {\n\tbc = R * bf;\n}\n\nvoid LevelAggregation::interpolate(const Vector &xc, Vector &xf) const {\n\txf = P * xc;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.7455968856811523, "alphanum_fraction": 0.747553825378418, "avg_line_length": 24.549999237060547, "blob_id": "17bc766a2c41598aaf78dd32a63dca084a99fdd0", "content_id": "9e44b26d940a8c63936cbb57eed5bb479527005d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "permissive", "max_line_length": 80, "num_lines": 20, "path": "/networkit/dynamic.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "# extension imports\nfrom _NetworKit import Graph, GraphEvent, DGSStreamParser, GraphUpdater, APSP\n\n\ndef graphFromStream(stream, weighted, directed):\n\t\"\"\" Convenience function for creating a new graph from a stream of graph events\n\n\tParameters\n\t----------\n\tstream : list of GraphEvent\n\t\tevent stream\n\tweighted : produce a weighted or unweighted graph\n\t\tboolean\n\tdirected : produce a directed or undirected graph\n\t\tboolean\n\t\"\"\"\n\tG = Graph(0, weighted, directed)\n\tgu = GraphUpdater(G)\n\tgu.update(stream)\n\treturn G\n" }, { "alpha_fraction": 0.678002119064331, "alphanum_fraction": 0.6865037083625793, "avg_line_length": 19.45652198791504, "blob_id": "82a54ca3989b096465d2b8f2d82ef183710fde41", "content_id": "0b9e190787c80600b040b2694aa010136b999f2b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 941, "license_type": "permissive", "max_line_length": 153, "num_lines": 46, "path": "/networkit/cpp/graph/DynDijkstra.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * DynDijkstra.h\n *\n * Created on: 21.07.2014\n * Author: ebergamini\n */\n\n#ifndef DYNDIJKSTRA_H_\n#define DYNDIJKSTRA_H_\n\n#include \"DynSSSP.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup graph\n * Dynamic Dijkstra.\n */\nclass DynDijkstra : public DynSSSP {\n\npublic:\n\n\t/**\n\t * Creates the object for @a G and source @a s.\n\t *\n\t * @param G The graph.\n\t * @param s The source node.\n\t * @param storePredecessors keep track of the lists of predecessors?\n\t */\n\tDynDijkstra(const Graph& G, node s, bool storePredecessors = true);\n\n\t// TODO the run method could take a vector of distances as an input and in that case just use those distances instead of computing dijkstra from scratch\n\tvoid run() override;\n\n\t/** Updates the distances after an event.*/\n\tvoid update(const std::vector<GraphEvent>& batch) override;\n\nprotected:\n\tenum Color {WHITE, BLACK};\n\tstd::vector<Color> color;\n};\n\n\n} /* namespace NetworKit */\n\n#endif /* DYNDIJKSTRA_H_ */\n" }, { "alpha_fraction": 0.5101892352104187, "alphanum_fraction": 0.5858806371688843, "avg_line_length": 16.84415626525879, "blob_id": "dfb325e10a74a3ea3aaf1d8e34a4b683416979a8", "content_id": "277c4918b86d3f985bbbe8c422bc9e695d189e6b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1374, "license_type": "permissive", "max_line_length": 45, "num_lines": 77, "path": "/networkit/cpp/structures/test/UnionFindGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PartitionGTest.cpp\n *\n * Created on: 04.12.2013\n * Author: Maximilian Vogel ([email protected])\n */\n\n#include <iostream>\n\n#include \"UnionFindGTest.h\"\n\n#include \"../UnionFind.h\"\n\n#ifndef NOGTEST\n\nnamespace NetworKit {\n\nTEST_F(UnionFindGTest, testAllToSingletons) {\n\tUnionFind p(10);\n\tp.allToSingletons();\n\tfor (int i = 0; i < 10; i++) {\n\t\tEXPECT_TRUE(p.find(i) != p.find((i+1)%10));\n\t}\n}\n\nTEST_F(UnionFindGTest, testMergeSimple) {\n\tUnionFind p(10);\n\tp.merge(3,8);\n\tEXPECT_EQ(p.find(8),p.find(3));\t\n}\n\n\nTEST_F(UnionFindGTest, testMergeSubsets) {\n\tUnionFind p(10);\n\tp.allToSingletons();\n\tp.merge(0,9);\n\tp.merge(1,8);\n\tp.merge(2,7);\n\tp.merge(0,1);\n\tp.merge(1,2);\n\tEXPECT_EQ(p.find(9),p.find(7));\n}\n\nTEST_F(UnionFindGTest, testMergeCircular) {\n\tUnionFind p(16);\n\n\tp.merge(0, 4);\n\tp.merge(1, 5);\n\tp.merge(2, 6);\n\tp.merge(3, 7);\n\tp.merge(8, 12);\n\tp.merge(9, 13);\n\tp.merge(10, 14);\n\tp.merge(11, 15);\n\n\tp.merge(0, 8);\n\tp.merge(1, 9);\n\tp.merge(2, 10);\n\tp.merge(3, 11);\n\tp.merge(4, 12);\n\tp.merge(5, 13);\n\tp.merge(6, 14);\n\tp.merge(7, 15);\n\n\tfor (index i = 0; i < 15; ++i) {\n\t\tEXPECT_EQ(p.find(i), p.find((i+4) % 16));\n\t\tEXPECT_EQ(p.find(i), p.find((i+8) % 16));\n\t\tEXPECT_EQ(p.find(i), p.find((i+12) % 16));\n\t\tEXPECT_NE(p.find(i), p.find((i+1) % 16));\n\t\tEXPECT_NE(p.find(i), p.find((i+2) % 16));\n\t\tEXPECT_NE(p.find(i), p.find((i+3) % 16));\n\t}\n}\n\n} /* namespace NetworKit */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6773743033409119, "alphanum_fraction": 0.6885474920272827, "avg_line_length": 16.047618865966797, "blob_id": "ef0897c5bc28fc66228d477ad179fd7a76127b5c", "content_id": "c7f51536e9b6de8ea2717908e066ac8d615d9dab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 716, "license_type": "permissive", "max_line_length": 55, "num_lines": 42, "path": "/networkit/cpp/io/SNAPGraphReader.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SNAPGraphReader.h\n *\n * Created on: 19.05.2014\n * Author: Maximilian Vogel\n */\n\n#ifndef SNAPGRAPHREADER_H_\n#define SNAPGRAPHREADER_H_\n\n//#include <unordered_set>\n//#include <vector>\n//#include <fstream>\n\n#include <unordered_map>\n\n#include \"../graph/Graph.h\"\n#include \"../structures/Partition.h\"\n#include \"GraphReader.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup io\n */\nclass SNAPGraphReader : public NetworKit::GraphReader {\nprotected:\n\tstd::unordered_map<node,node> mapNodeIds;\n\npublic:\n\n\t/** Default constructor */\n\tSNAPGraphReader() = default;\n\n\tvirtual Graph read(const std::string& path) override;\n\n\tstd::unordered_map<node,node> getNodeIdMap();\n\n};\n\n} /* namespace NetworKit */\n#endif /* SNAPGRAPHREADER_H_ */\n" }, { "alpha_fraction": 0.7423624992370605, "alphanum_fraction": 0.7541460394859314, "avg_line_length": 48.63176727294922, "blob_id": "bc3d69543544d464638b94639dec021f478a1d5b", "content_id": "2c8f0de7fe8d047e52c7843b59335b4c8fe35b71", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 13748, "license_type": "permissive", "max_line_length": 456, "num_lines": 277, "path": "/Readme.mdown", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "NetworKit\n=========\n\n[NetworKit][networkit] is an open-source tool suite for high-performance\nnetwork analysis. Its aim is to provide tools for the analysis of large\nnetworks in the size range from thousands to billions of edges. For this\npurpose, it implements efficient graph algorithms, many of them parallel to\nutilize multicore architectures. These are meant to compute standard measures\nof network analysis. NetworKit is focused on scalability and comprehensiveness.\nNetworKit is also a testbed for algorithm engineering and\ncontains novel algorithms from recently published research (see list of publications below).\n\nNetworKit is a Python module. High-performance algorithms are written in C++ and exposed to Python\nvia the Cython toolchain. Python in turn gives us the ability to work interactively and a\nrich environment of tools for data analysis and scientific computing.\nFurthermore, NetworKit's core can be built and used as a native library if needed.\n\n\n## Installation options\n\nWe support three ways to install NetworKit:\n\n- NetworKit Virtual Machine: Download and try NetworKit preinstalled on a virtual machine. This is recommended for users using __Microsoft Windows__.\n- Pip install: Download the NetworKit Python package with pip. This is the easier way to get NetworKit but you can only use NetworKit via Python this way.\n- Build NetworKit from Source: Clone or download the source code of NetworKit and build the C++ and Python modules from source.\n\nMore detailed instructions follow after the requirements section.\nWith NetworKit as a Python extension module, you get access to native high-performance code and can at the same time work interactively in the Python ecosystem. Although the standard Python interpreter works fine, we recommend [IPython] and [jupyterhub](https://github.com/jupyterhub/jupyterhub) as great environments for scientific computing.\n\nOnce you have installed NetworKit, please make sure to check out our [NetworKit UserGuide] for an overview of the features provided in NetworKit.\n\n## Documentation\n\nIn addition to this `Readme`, the `NetworKit_UserGuide` provides an introduction to the NetworKit tools, in the form of an interactive IPython Notebook. The `DevGuide` is meant for developers who would like to contribute. When using NetworKit as a Python module, refer to the docstrings of classes, methods and functions.\n\nC++ sources are also documented in [Doxygen](http://www.stack.nl/~dimitri/doxygen/) format, while the documentation for the Python sources can be generated with [Sphinx](http://sphinx-doc.org/). If you have both utilities installed, the documentation can be easily generated by calling the script `make_docs.sh` in `Doc/docs`.\n\nTo convert the documentation markdown files to PDF install the [pandoc](http://code.google.com/p/pandoc/downloads/list) utility and call the script `docs2pdf.sh`.\n\n## Contact\n\nFor questions regarding NetworKit, subscribe to our [e-mail list][list] (`[email protected]`) and feel free to ask.\n\n## Requirements\n\nYou will need the following software to install NetworKit as a python\npackage:\n\n- A modern C++ compiler, e.g.: [g++] (&gt;= 4.8) or [clang++]\n (&gt;= 3.7)\n- OpenMP for parallelism (usually ships with the compiler)\n- Python 3 (&gt;= 3.4 is recommended, 3.3 supported)\n- [Pip]\n- [SCons]: Please note that SCons is only available for Python 2. For\n installation via pip, we have a script that builds the C++ part of\n NetworKit, so you can try it without SCons. If you are interested in building different configurations and targets (e.g. unittests) from source, SCons is necessary.\n- [for developers: Cython (>= 0.21)]\n\n## Installation instructions\n\n### Installing NetworKit via pip\nNetworKit uses some additional external Python packages. While you do not need them to run NetworKit, it is recommended to install them in order to use all the features of NetworKit:\n\n- scipy\n- numpy\n- readline\n- matplotlib\n- networkx\n- tabulate\n\nYou can use the command `pip3 install scipy numpy readline matplotlib networkx tabulate` on your terminal to install all packages at once. From the list of requirements, you need at least a C++ compiler (including OpenMP) and pip (and optionally SCons). NetworKit can be installed via pip with the following command:\n\n [sudo] pip[3] install [--user] networkit\n\nDuring the installation process, the setup will check if the aforementioned external packages are available and print warnings at the end of the installation process.\n\n__Note:__ All of the above installation command may require root privileges depending on your system, so try this accordingly. If you do not have root privileges, add `--user` to your command.\n\n\n### Building NetworKit as a Python Module from source\n\nRun the script `setup.py` with the following options:\n\n\tpython3 setup.py build_ext --inplace [--optimize=V] [-jX]\n\nThe script will call scons to compile NetworKit as a library and then build the extensions in the top folder. By default, NetworKit will be built with the amount of available cores in optimized mode. It is possible the add the options `--optimize=V` and `-jN` the same way it can be done to a manual scons call, to specify the optimization level and the number of threads used for compilation.\nThe setup script provides more functionality and can be used with pip aswell:\n\n\tpip3 install -e .\n\nwill compile NetworKit, build the extensions and on top of that temporarily install NetworKit so that it is available on the whole system. `pip3 uninstall networkit` will remove networkit.\n\n\tpython3 setup.py clean [--optimize=V]\n\nwill remove the extensions and its build folder as well as call scons to remove the NetworKit library and its build folder specified by `--optimize=V`.\n\n\n### `jupyterhub`: An interactive environment for working with NetworKit\n\nTo check that everything works as expected, open a python terminal and run the example from the beginning of this document or the following lines:\n\n >>> import networkit\n >>> G = networkit.Graph(5)\n >>> G.addEdge(0,1)\n >>> G.toString()\n\nAdditionally, we recommend that you familiarize yourself with NetworKit through experimenting with the interactive IPython Notebook `NetworKit_UserGuide.ipynb` located in the folder `Doc/Notebooks`. The user guide also introduces a large portion of NetworKits functionality with usage examples. To display and work with these notebooks, you have to install jupyterhub and start a local notebook server from the terminal with:\n\n\tjupyterhub --no-ssl\n\nIf you run into any problems with jupyterhub, head over to the [jupyterhub documentation](https://jupyterhub.readthedocs.io/en/latest/) and make sure, you have the listed packages installed. If the notebook server starts as it is supposed to, your default browser should open a web interface or you have to open it manually. Then you can add `NetworKit_UserGuide.ipynb` from the above mentioned location or browse to the location through the web interface.\n\nTo show plots within the notebooks, place the following two lines at the beginning of your notebook:\n\n\t%matplotlib\n\tmatplotlib.pyplot as plt\n\n__Note:__ Instead of running jupyterhub, it may still be possible to run `ipython3 notebook`. However, the notebook functionality of the ipython package is deprecated and has been moved to jupyterhub, which we strongly recommend.\n\n\n### Usage example\n\nNow that you are done installing NetworKit, you might want to try the following example:\n\n\t>>> from networkit import *\n\t>>> g = generators.HyperbolicGenerator(1e5).generate()\n\t>>> overview(g)\n\tNetwork Properties for:\t\t G#5\n\tnodes, edges\t\t\t 100000, 302148\n\tdirected?\t\t\t False\n\tweighted?\t\t\t False\n\tisolated nodes\t\t\t 1859\n\tself-loops\t\t\t 0\n\tdensity\t\t\t\t 0.000060\n\tclustering coefficient\t\t 0.718261\n\tmin/max/avg degree\t\t 0, 1045, 6.042960\n\tdegree assortativity\t\t 0.000725\n\tnumber of connected components\t4237\n\tsize of largest component\t 77131 (77.13 %)\n\t>>> communities = community.detectCommunities(g, inspect=True)\n\tPLM(balanced,pc,turbo) detected communities in 0.3781468868255615 [s]\n\tsolution properties:\n\t------------------- -----------\n\t# communities 4468\n\tmin community size 1\n\tmax community size 1820\n\tavg. community size 22.3814\n\tmodularity 0.989285\n\t------------------- -----------\n\t>>>\n\n\n### Building the C++ Core only\n\nIn case you do not need NetworKit's Python functionality, this section describes how to build the C++ parts only. We recommend [SCons] for building the C++ part of NetworKit. Individual settings for your environment will be read from a configuration file. As an example, the file `build.conf.example` is provided. Copy this to `build.conf` and edit your environment settings. Then call `scons`.\n\nThe call to SCons has the following options:\n\n\tscons --optimize=<level> --target=<target>\n\nwhere `<level>` can be\n\n- `Dbg` \tdebug\n- `Opt`\t\toptimized\n- `Pro`\t\tprofiling\n\nand `<target>` can be\n\n- `Core`\t\t\t\tbuild NetworKit as a library, required by the Python shell\n- `Tests`\t\t\t\tbuild executable for the unit tests\n- `Lib`\t\t\t\t\tbuild NetworKit as a library and create symbolic links\n\nFor example, to build NetworKit as an optimized library, run\n\n\tscons --optimize=Opt --target=Lib\n\nTo speed up the compilation on a multicore machine, you can append `-jX` where X denotes the number of threads to compile with.\n\nLogging is enabled by default. If you want to disable logging functionality, add the following to your scons call:\n\n\t--logging=no\n\n### Use NetworKit as a library\n\nIt is also possible to use NetworKit as a library. Therefore, choose the target `Lib` when compiling NetworKit. The include directives in your C++\\-application look like the following\n\n\t#include <NetworKit/graph/Graph.h>\n\nNetworKit in the directory `include `is a symlink to the directory `networkit/cpp`, so the directory structure from the repository is valid. To compile your application, you need to add the paths for the header files and the location of the library. Note, that it is possible to link the different builds (debug, profiling, optimized) of the library. There is a simple source file to demonstrate this. Feel free to compile `LibDemo.cpp` as follows:\n\n\tg++ -o LibDemo -std=c++11 -I/path/to/repository/include -L/path/to/repository LibDemo.cpp -lNetworKit -fopenmp\n\n### Unit tests\n\nYou actually don't need to build and run our unit tests. However if you experience any issues with NetworKit, you might want to check, if NetworKit runs properly. The unit tests can only be run from a clone or copy of the repository and not from a pip installation. Please refer to the `Unit Tests and Testing` section in our `DevGuide`.\n\n## Known Issues\n- Mac OS X 10.10 \"Yosemite\": Some users have reported compilation problems on Yosemite with g++ 4.9. The compiler errors mention register problems.\n While the exact reason remains unclear, the actual issue seems to be that the compiler tries to perform a dual architecture build.\n Fix: Enforce a 64-bit build by prepending `ARCHFLAGS=\"-arch x86_64\"` to your setup/pip command, e.g. as in\n `sudo ARCHFLAGS=\"-arch x86_64\" python3 setup.py build_ext --inplace -j4` or `sudo ARCHFLAGS=\"-arch x86_64\" pip3 install networkit`.\n- NetworKit has not yet been successfully built on __Windows__. This is partially due to the fact that Windows ships without a C++ compiler which is necessary to build the Python extensions. Even with the Visual C++ Redistributable our attempts were not successful. Any help is appreciated. It may be possible to build NetworKit as a library on Windows in environments like MinGW or Cygwin.\n\n## Contributions\n\nWe would like to encourage contributions to the NetworKit source code. See the development guide (`DevGuide.mdown`) for instructions. For support please contact the [mailing list][list].\n\n\n## Credits\n\n### Core Development Team\n\nNetworKit is maintained by the [Research Group Parallel Computing](http://parco.iti.kit.edu) of the Institute of Theoretical Informatics at [Karlsruhe Institute of Technology (KIT)](http://www.kit.edu/english/index.php>).\n\n### Maintainers\n- Christian L. Staudt\n- Henning Meyerhenke\n- Maximilian Vogel\n\n### Contributors\n- Lukas Barth\n- Miriam Beddig\n- Elisabetta Bergamini\n- Stefan Bertsch\n- Pratistha Bhattarai\n- Andreas Bilke\n- Simon Bischof\n- Guido Brückner\n- Mark Erb\n- Kolja Esders\n- Patrick Flick\n- Michael Hamann\n- Lukas Hartmann\n- Daniel Hoske\n- Gerd Lindner\n- Moritz v. Looz\n- Yassine Marrakchi\n- Mustafa Özdayi\n- Marcel Radermacher\n- Klara Reichard\n- Matteo Riondato\n- Marvin Ritter\n- Aleksejs Sazonovs\n- Arie Slobbe\n- Florian Weber\n- Michael Wegner\n- Jörg Weisbarth\n\n\n### External Code\n\nThe program source includes:\n\n- the *[The Lean Mean C++ Option Parser][optparse]* by Matthias S. Benkmann\n- the *[TTMath]* bignum library by Tomasz Sowa\n\n[mitlicense]: http://opensource.org/licenses/MIT\n[optparse]: http://optionparser.sourceforge.net/\n[ttmath]: http://www.ttmath.org/\n\n## License\nThe source code of this program is released under the [MIT License][mitlicense]. We ask you to cite us if you use this code in your project (c.f. the publications section below and especially the [technical report](https://arxiv.org/abs/1403.3005)). Feedback is also welcome.\n\n## Publications\nThe [NetworKit publications page][nwkpubs] lists the publications on NetworKit as a toolkit, on algorithms available\nin NetworKit, and simply using NetworKit. We ask you to cite the appropriate ones if you found NetworKit useful for your own research.\n\n[nwkpubs]: https://networkit.iti.kit.edu/publications/\n[list]: https://lists.ira.uni-karlsruhe.de/mailman/listinfo/networkit\n[networkit]: https://networkit.iti.kit.edu/\n[IPython]: http://ipython.readthedocs.org/en/stable/\n[NetworKit UserGuide]: http://nbviewer.ipython.org/urls/networkit.iti.kit.edu/data/uploads/docs/NetworKit_UserGuide.ipynb\n[here]: TODO:website_link\n[g++]: https://gcc.gnu.org\n[clang++]: http://clang.llvm.org\n[Pip]: https://pypi.python.org/pypi/pip\n[SCons]: http://scons.org\n" }, { "alpha_fraction": 0.6324607133865356, "alphanum_fraction": 0.6382198929786682, "avg_line_length": 20.954023361206055, "blob_id": "cfbdcd75521554133664fae121c00b58e428137c", "content_id": "8b7499f1fe5184914f3e96e0bd9d54635799a937", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1910, "license_type": "permissive", "max_line_length": 178, "num_lines": 87, "path": "/networkit/cpp/graph/SSSP.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SSSP.cpp\n *\n * Created on: 15.04.2014\n * Author: cls\n */\n\n#include \"SSSP.h\"\n#include \"../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\nSSSP::SSSP(const Graph& G, node s, bool storePaths, bool storeStack, node target) : Algorithm(), G(G), source(s), target(target), storePaths(storePaths), storeStack(storeStack) {\n}\n\nstd::vector<edgeweight> SSSP::getDistances(bool moveOut) {\n\treturn (moveOut)?std::move(distances):distances;\n}\n\n\nstd::vector<node> SSSP::getPath(node t, bool forward) const {\n\tif (! storePaths) {\n\t\tthrow std::runtime_error(\"paths have not been stored\");\n\t}\n\tstd::vector<node> path;\n\tif (previous[t].empty()) { // t is not reachable from source\n\t\tWARN(\"there is no path from \", source, \" to \", t);\n\t\treturn path;\n\t}\n\tnode v = t;\n\twhile (v != source) {\n\t\tpath.push_back(v);\n\t\tv = previous[v].front();\n\t}\n\tpath.push_back(source);\n\n\tif (forward) {\n\t\tstd::reverse(path.begin(), path.end());\n\t}\n\treturn path;\n}\n\n\nstd::set<std::vector<node>> SSSP::getPaths(node t, bool forward) const {\n\n\tstd::set<std::vector<node>> paths;\n\tif (previous[t].empty()) { // t is not reachable from source\n\t\tWARN(\"there is no path from \", source, \" to \", t);\n\t\treturn paths;\n\t}\n\n\tstd::function<void (std::vector<node> suffix, node v) > trace = [&](std::vector<node> suffix, node v) {\n\t\t// base case\n\t\tsuffix.push_back(v);\n\t\tif (v == source) {\n\t\t\tpaths.insert(suffix);\n\t\t\treturn;\n\t\t}\n\t\tfor (node u : previous[v]) {\n\t\t\ttrace(suffix, u);\n\t\t}\n\t};\n\n\tstd::vector<node> emptyPath;\n\ttrace(emptyPath, t);\n\n\tif (forward) {\n\t\tstd::set<std::vector<node>> paths1;\n\t\tfor (std::vector<node> path : paths) {\n\t\t\tstd::reverse(std::begin(path), std::end(path));\n\t\t\tpaths1.insert(path);\n\t\t}\n\t\treturn paths1;\n\t}\n\n\treturn paths;\n}\n\n\nstd::vector<node> SSSP::getStack(bool moveOut) {\n\tif (!storeStack) {\n\t\tthrow std::runtime_error(\"stack has not been stored\");\n\t}\n\treturn (moveOut)?std::move(stack):stack;\n}\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.7256461381912231, "alphanum_fraction": 0.7311515808105469, "avg_line_length": 32.02525329589844, "blob_id": "6bbbc18894c6f79e67812a175a960e797df5225e", "content_id": "ea287efe898f1c53605b18541580246b8862f99f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6539, "license_type": "permissive", "max_line_length": 204, "num_lines": 198, "path": "/networkit/community.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "\"\"\" This module handles community detection, i.e. the discovery of densely connected groups in networks.\"\"\"\n\n__author__ = \"Christian Staudt\"\n\n\nfrom _NetworKit import Partition, Coverage, Modularity, CommunityDetector, PLP, LPDegreeOrdered, PLM, PartitionReader, PartitionWriter,\\\n\tNodeStructuralRandMeasure, GraphStructuralRandMeasure, JaccardMeasure, NMIDistance, AdjustedRandMeasure,\\\n\tStablePartitionNodes, IntrapartitionDensity, PartitionHubDominance, CoverHubDominance, PartitionFragmentation, IsolatedInterpartitionExpansion, IsolatedInterpartitionConductance,\\\n\tEdgeListPartitionReader, GraphClusteringTools, ClusteringGenerator, PartitionIntersection, HubDominance, CoreDecomposition, CutClustering, ParallelPartitionCoarsening\n\n# R.I.P.: The CNM (Clauset, Newman, Moore) community detection algorithm - it was always a bit slow, but it broke down in the end. Resurrect it from history (<= 3.4.1) if needed for experimental purposes.\n\n# local imports\n#from .properties import CoreDecomposition, overview\nfrom . import graph\nfrom . import stopwatch\nfrom . import graphio\n\n# external imports\nimport os\nimport math\nimport random\ntry:\n\timport tabulate\nexcept ImportError:\n\tprint(\"\"\" WARNING: module 'tabulate' not found, please install it to use the full functionality of NetworKit \"\"\")\nimport tempfile\nimport subprocess\n\ndef detectCommunities(G, algo=None, inspect=True):\n\t\"\"\" Perform high-performance community detection on the graph.\n\t\t:param G the graph\n\t\t:param algorithm community detection algorithm instance\n\t\t:return communities (as type Partition)\n\t\t\"\"\"\n\tif algo is None:\n\t\talgo = PLM(G, refine=False)\n\tt = stopwatch.Timer()\n\talgo.run()\n\tzeta = algo.getPartition()\n\tt.stop()\n\tprint(\"{0} detected communities in {1} [s]\".format(algo.toString(), t.elapsed))\n\tif inspect:\n\t\tprint (\"solution properties:\")\n\t\tinspectCommunities(zeta, G)\n\treturn zeta\n\ndef inspectCommunities(zeta, G):\n\t\"\"\" Display information about communities\n\t\t:param zeta communities\n\t\t:param G graph\n\t\"\"\"\n\tcommunitySizes = zeta.subsetSizes()\n\tmod = Modularity().getQuality(zeta, G)\n\tcommProps = [\n\t\t[\"# communities\", zeta.numberOfSubsets()],\n\t\t[\"min community size\", min(communitySizes)],\n\t\t[\"max community size\", max(communitySizes)],\n\t\t[\"avg. community size\", sum(communitySizes) / len(communitySizes)],\n\t\t#[\"imbalance\", zeta.getImbalance()],\n\t\t[\"modularity\", mod],\n\t]\n\tprint(tabulate.tabulate(commProps))\n\n\ndef communityGraph(G, zeta):\n\t\"\"\" Create a community graph, i.e. a graph in which one node represents a community and an edge represents the edges between communities, from a given graph and a community detection solution\"\"\"\n\tcg = ParallelPartitionCoarsening(G, zeta)\n\tcg.run()\n\treturn cg.getCoarseGraph()\n\n\ndef evalCommunityDetection(algo, G):\n\t\"\"\" Evaluate a community detection algorithm \"\"\"\n\n\tt = stopwatch.Timer()\n\talgo.run()\n\tzeta = algo.getPartition()\n\tt.stop()\n\tresults = [\n\t\t[\"time [s]\", t.elapsed],\n\t\t[\"# communities\", zeta.numberOfSubsets()],\n\t\t[\"modularity\", Modularity().getQuality(zeta, G)]\n\t]\n\tprint(tabulate.tabulate(results))\n\n\ndef readCommunities(path, format=\"default\"):\n\t\"\"\" Read a partition into communities from a file\"\"\"\n\treaders = {\"default\": PartitionReader(),\n\t\t\"edgelist-t1\": EdgeListPartitionReader(1, '\\t'),\n\t\t\"edgelist-t0\": EdgeListPartitionReader(0, '\\t'),\n\t\t\"edgelist-s1\": EdgeListPartitionReader(1, ' '),\n\t\t\"edgelist-s0\": EdgeListPartitionReader(0, ' '),\n\t\t}\n\t# get reader\n\ttry:\n\t\treader = readers[format]#(**kwargs)\n\texcept KeyError:\n\t\traise Exception(\"unrecognized format: {0}\".format(format))\n\n\t# get proper file path\n\tif (\"~\" in path):\n\t\tpath = os.path.expanduser(path)\n\t\tprint(\"path expanded to: {0}\".format(path))\n\t# check if file path leads to a valid file\n\tif not os.path.isfile(path):\n\t\traise IOError(\"{0} is not a file\".format(path))\n\telse:\n\t\twith open(path, \"r\") as file: # catch a wrong path before it crashes the interpreteri\n\t\t\tprint(\"read communities from: {0}\".format(path))\n\t\t\tcommunities = reader.read(path)\n\t\t\treturn communities\n\n\treturn None\n\n\ndef writeCommunities(communities, path):\n\t\"\"\" Write a partition into communities to a file\"\"\"\n\tPartitionWriter().write(communities, path)\n\tprint(\"wrote communities to: {0}\".format(path))\n\n\ndef compareCommunities(G, zeta1, zeta2):\n\t\"\"\" Compare the partitions with respect to several (dis)similarity measures\"\"\"\n\traise NotImplementedError(\"TODO:\")\n\n\ndef kCoreCommunityDetection(G, k, algo=None, inspect=True):\n\t\"\"\" Perform community detection on the k-core of the graph, which possibly\n\t\treduces computation time and enhances the result.\n\t\t:param G the graph (may not contain self-loops)\n\t\t:param\t\tk \tk as in k-core\n\t\t:param algorithm community detection algorithm instance\n\t\t:return communities (as type Partition)\n\t\t\"\"\"\n\tcoreDec = CoreDecomposition(G)\n\tcoreDec.run()\n\n\tcores = coreDec.cores()\n\ttry:\n\t\tkCore = cores[k]\n\texcept IndexError:\n\t\traise Error(\"There is no core for the specified k\")\n\n\tC = graph.Subgraph().fromNodes(G, kCore)\t# FIXME: node indices are not preserved\n\n\t#properties.overview(C)\n\n\treturn detectCommunities(C, algo, inspect)\n\n\ndef mesoscopicResponseFunction(G, samples=100):\n\t\"\"\"\n\t\"\"\"\n\traise NotImplementedError(\"work in progress\")\n\tm = G.numberOfEdges()\n\tgammaRangeLow = [math.e**x for x in range(-10, 0)]\n\tgammaRangeHigh = [math.e**x for x in range(0, math.ceil(math.log(2*m)))]\n\tgammaRange = gammaRangeLow + gammaRangeHigh\n\tprint(gammaRange)\n\tnCom = []\n\n\tfor gamma in gammaRange:\n\t\tcommunityDetector = PLM(G, gamma=gamma)\n\t\tcommunityDetector.run()\n\t\tcommunities = communityDetector.getPartition()\n\t\tnCom.append(communities.numberOfSubsets())\n\n\treturn (gammaRange, nCom)\n\n\nclass InfomapAdapter:\n\n\tinfomapPath = None\n\n\tdef __init__(self, G):\n\t\tself.G = G\n\n\t@classmethod\n\tdef setPath(cls, infomapPath):\n\t\tcls.infomapPath = infomapPath\n\n\tdef run(self):\n\t\tif not self.infomapPath:\n\t\t\traise Exception(\"set path to infomap binary with 'setPath' class method\")\n\t\twith tempfile.TemporaryDirectory() as tempdir:\n\t\t\tprint(\"temporary file directory: \", tempdir)\n\t\t\tgraph_filename = os.path.join(tempdir, \"network.txt\")\n\t\t\tgraphio.writeGraph(self.G, graph_filename, fileformat=graphio.Format.EdgeListSpaceZero)\n\t\t\tsubprocess.call([self.infomapPath, \"-s\", str(random.randint(-2**31, 2**31)), \"-2\", \"-z\", \"--clu\", graph_filename, tempdir])\n\t\t\tself.result = readCommunities(os.path.join(tempdir, \"network.clu\"), format=\"edgelist-s0\")\n\t\t\twhile self.result.numberOfElements() < self.G.upperNodeIdBound():\n\t\t\t\tself.result.toSingleton(result.extend())\n\t\treturn self\n\n\tdef getPartition(self):\n\t\treturn self.result\n" }, { "alpha_fraction": 0.6341463327407837, "alphanum_fraction": 0.6646341681480408, "avg_line_length": 15.399999618530273, "blob_id": "de303a43c444f9c2adf732852694a6858a156a18", "content_id": "0bd3a09ee7fad3c535bc1010717754dec2f7136d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 492, "license_type": "permissive", "max_line_length": 45, "num_lines": 30, "path": "/networkit/cpp/graph/test/Graph2Benchmark.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Graph2Benchmark.h\n *\n * Created on: 05.02.2013\n * Author: Christian Staudt ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef GRAPH2BENCHMARK_H_\n#define GRAPH2BENCHMARK_H_\n\n#include <gtest/gtest.h>\n\n#include \"../Graph.h\"\n#include \"../../auxiliary/Timer.h\"\n#include \"../../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\nclass Graph2Benchmark: public testing::Test {\npublic:\n\tGraph2Benchmark();\n\tvirtual ~Graph2Benchmark();\n};\n\n} /* namespace NetworKit */\n#endif /* GRAPH2BENCHMARK_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.7880798578262329, "alphanum_fraction": 0.7900869846343994, "avg_line_length": 86.92156982421875, "blob_id": "2e8da5f74e5de3e59106c782556c31bc096632f0", "content_id": "f99c0d824872f454d8b8c9f4c74b37b4a54b264c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 17941, "license_type": "permissive", "max_line_length": 921, "num_lines": 204, "path": "/Doc/doc/features.rst", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": ".. |br| raw:: html\n\n <br />\n\n.. |separator| raw:: html\n\n\t<div style=\"padding-top: 25px; border-bottom: 1px solid #d4d7d9;\"></div>\n\n=========================\nFeatures and Design Goals\n=========================\n\n**NetworKit** is implemented as a hybrid of performance-aware code written in C++ (often parallelized using OpenMP) with an interface and additional functionality written in Python.\nMore details and an illustration are provided in the `Architecture`_ Section below.\nNetworKit is distributed as a Python package, ready to use interactively from a Python shell, which is the main usage scenario we envision for domain experts.\nIf you want to know more about our design goals, then take a look at our `Design Goals and Principles`_ Section below.\n\nThe best way to get an overall picture of a network is to use the *Profiling* module. Take a look at the `Network Profiling`_ Section below. If you are only interested in a\nsmall subset of network analysis measures, it might be more convenient to compute them separately instead of using the *Profiling* module. Check out the `Network Analytics`_\nSection to get an overview of the most important measures NetworKit supports.\n\nNetworKit also comes with several `community detection algorithms`_ that reveal insights into the community structure of a network. For the generation of synthetic networks with\nspecific properties, the toolkit provides several `graph generators`_.\n\nA good (albeit in some parts slightly outdated) introduction to NetworKit and its features is given in the following video.\n\n.. raw:: html\n\n <div style=\"margin 0px auto; text-align: center;\">\n <iframe width=\"700\" height=\"394\" src=\"https://www.youtube.com/embed/RtZyHCGyeIk\" frameborder=\"0\" allowfullscreen></iframe>\n <p>Please note that the video is more than two years old and is therefore slightly outdated in some parts.</p>\n </div>\n\n|separator|\n\nNetwork Profiling\n-----------------\n\nTo see the most important features of a network at a glance, NetworKit provides the *Profiling* module. The module assembles many algorithms into one program, automates analysis tasks and produces a graphical report to be displayed in the Jupyter Notebook or exported to an HTML or LATEX report document. For example, the following is a an excerpt of the Profiling report for the graph *MIT8.edgelist* (available in the NetworKit repository):\n\n.. image:: resources/profiling_overview.png\n\t:align: center\n\nFor detailed instructions on how to use the Profiling module take a look at our `Profiling Notebook <http://nbviewer.jupyter.org/urls/networkit.iti.kit.edu/uploads/docs/Profiling.ipynb>`_.\n\n|separator|\n\nNetwork Analytics\n-----------------\n\nNetworKit provides a rich set of network analysis methods. For details on the supported measures take a look at the `Technical Report <http://arxiv.org/pdf/1403.3005v1.pdf>`_.\n\nDegree Distribution\n^^^^^^^^^^^^^^^^^^^\nEmpirically observed complex networks tend to show a heavy tailed degree distribution which follow a power-law with a characteristic exponent. NetworKit provides functions to analyze the\ndegree distribution of a network. For details visit the\n`Degree Distribution <http://nbviewer.jupyter.org/urls/networkit.iti.kit.edu/uploads/docs/NetworKit_UserGuide.ipynb#Degree-Distribution>`_ Section of the User Guide. The algorithm runs in :math:`O(n)`.\n\n(Degree) Assortativity\n^^^^^^^^^^^^^^^^^^^^^^\n\nDegree assortativity measures how well nodes with similar node degrees are connected to each other which can point to important aspects such as a hierarchical network composition.\nIn NetworKit, we implemented Newman's formulation in linear time (:math:`O(m)`) and constant memory requirements.\n\nDiameter\n^^^^^^^^\n\nThe diameter of a graph is the maximum length of a shortest path between any two nodes. Many real-world complex networks have a very small and often constant diameter.\nNetworKit provides a function to calculate the exact diameter as well as several approximation algorithms for large networks. The exact algorithm runs in :math:`O(n*(n+m))` or\n:math:`O(n*(n*log(n) + m))` if the network is weighted, where :math:`n` and :math:`m` are defined as number of nodes and edges respectively.\n\nClustering Coefficients\n^^^^^^^^^^^^^^^^^^^^^^^\n\nClustering coefficients are key figures for the amount of transitivity in networks. NetworKit provides functions for both the global clustering coefficient as well as the local clustering\ncoefficient. NetworKit implements the wedge sampling approximation algorithm. It runs in essentially linear or even constant time, depending on the respective measure. For details on the\nusage visit the `Clustering Coefficient <http://nbviewer.jupyter.org/urls/networkit.iti.kit.edu/uploads/docs/NetworKit_UserGuide.ipynb#Transitivity-/-Clustering-Coefficients>`_\nSection of the User Guide.\n\nComponents and Cores\n^^^^^^^^^^^^^^^^^^^^\n\nWe compute connected components in linear time using a parallel label propagation scheme in which each node adopts the maximum label in its neighborhood. Take a look at\nthe `Connected Components <http://nbviewer.jupyter.org/urls/networkit.iti.kit.edu/uploads/docs/NetworKit_UserGuide.ipynb#Connected-Components>`_ Section in the User Guide.\n\nThe core decomposition algorithm implemented in NetworKit uses a bucket data structure for managing remaining node degrees and has a running time which is linear\nin the number of edges. Visit the `Core Decomposition <http://nbviewer.jupyter.org/urls/networkit.iti.kit.edu/uploads/docs/NetworKit_UserGuide.ipynb#Core-Decomposition>`_\nSection of the User Guide for usage details.\n\nCentrality\n^^^^^^^^^^\n\nCentrality refers to the relative importance of a node or edge within a network. We distribute efficient implementations for betweenness, closeness, degree, Katz, eigenvector centrality and PageRank.\n\n* Betweenness: |br| Betweenness centrality expresses the concept that a node is important if it lies on many shortest paths between nodes in the network. A naive algorithm for calculating betweeness centrality for all nodes would require cubic time. We implement Brandes's algorithm, by which betweenness centrality can be computed more efficiently (:math:`O(n*m)` time, where :math:`n` is the number of nodes and :math:`m` is the number of edges of the graph). Optionally, our implementation computes the scores in parallel, at the expense of a larger memory footprint. Since this is still practically infeasible for the large data sets we target, NetworKit includes also parallelized implementations of two approximation algorithms. One of them has probabilistic guarantees such that the error is at most an additive constant, whereas the other has no theoretical guarantee but performs very well in practice. |br| |br|\n\n* Closeness: |br| Closeness centrality measures the importance of a node by the average distance to every other node in the graph. A node is therefore important when the distances to the other nodes are rather small. Computing the closeness values for all nodes requires to solve a single-source shortest path problem for all nodes (:math:`O(n*m)` time, where :math:`n` is the number of nodes and :math:`m` is the number of edges of the graph) which makes it infeasible to compute for large networks. We therefore also provide an approximation algorithm with a probabilistic error guarantee. Notice that, since many real-world complex networks have a small diameter, the range of closeness values for the nodes of such a network is rather small. |br| |br|\n\n* Degree: |br| Degree centrality simply ranks the nodes by their degree (i.e. nodes with high degree are more important than nodes having low degrees). The degree centrality can be computed in linear time for all nodes and the algorithm can be parallelized easily. |br| |br|\n\n* Katz: |br| Katz centrality is based on the idea that a node is important when the number of its neighbors is high and the other nodes are in close distance to its neighbors. Far away nodes play a less important role which is accomodated for by the use of an attenuation factor. |br| |br|\n\n* Eigenvector Centrality and PageRank: |br| Eigenvector centrality and its variant PageRank assign relative importance to nodes according to their connections, incorporating the idea that edges to high-scoring nodes contribute more. PageRank is a version of eigenvector centrality which introduces a damping factor, modeling a random web surfer which at some point stops following links and jumps to a random page. In PageRank theory, centrality is understood as the probability of such a web surfer to arrive on a certain page. Both variants are implemented in NetworKit based on parallel power iteration.\n\n\n|separator|\n\n.. _community detection algorithms:\n\nCommunity Detection\n-------------------\n\nCommunity detection is the task of identifying groups of nodes in the network which are significantly more densely connected among each other than to the rest of the nodes.\nFaced with an NP-hard optimization problem, we engineered parallel heuristics which deliver a good tradeoff between quality and running time.\n\n* PLP: |br| Community detection by label propagation extracts communities from a labelling of the node set. Each iteration takes linear time, and the algorithm has been\n empirically shown to reach a stable solution in only a few iterations. The purely local update rule makes label propagation well suited for a parallel implementation. |br| |br|\n\n* PLM: |br| The Louvain method (PLM) for community detection can be classified as a locally greedy, bottom-up multilevel algorithm. We provide a shared-memory parallelization\n of PLM in which node moves are evaluated and performed in parallel instead of sequentially. We also extend the method by an optional refinement phase, yielding the PLMR algorithm. |br| |br|\n\n\n|separator|\n\n\n.. _graph generators:\n\nGraph Generators\n----------------\n\nGenerative models aim to explain how networks form and evolve specific structural features. Such models and their implementations as generators have at least two important uses: On the one hand, software engineers want generators for synthetic datasets which can be arbitrarily scaled and produce graphs which resemble the real application data. On the other hand, network scientists employ models to increase their understanding of network phenomena. So far, NetworKit provides efficient generators for the following models:\n\n* Erdös-Renyi Model: |br| In this simple model edges are created among the nodes with a uniform probability for all pairs of vertices. Not intended to generate realistic graphs, it was viewed as a source of mathematical examples. |br| |br|\n\n* Clustered Random Graphs: |br| A simple variation of the Erdös-Renyi model is useful for generating graphs which have distinctive dense areas with sparse connections between them (i.e. communities). Nodes are equally distributed over k subsets, while nodes from the same subset are connected with a higher probability than nodes from different subsets. |br| |br|\n\n* Barabasi-Albert Model: |br| This model implements a preferential attachment process (\"rich become richer\") which results in a power-law degree distribution. The model was introduced in order to produce scale-free networks. |br| |br|\n\n* R-MAT Generator: |br| The Recursive Matrix (R-MAT) model was proposed to recreate properties including a power-law degree distribution, the small-world property and self-similarity. The R-MAT generator operates on the initially empty adjacency matrix which is recursively subdivided into four quadrants. Edges are \"dropped\" into the matrix and land in one of the quadrants according to given probabilities. NetworKit includes an efficient sequential implementation of R-MAT. |br| |br|\n\n* Chung-Lu Model: |br| The Chung-Lu model is a random graph model which aims to replicate a given degree distribution. The model can be conceived as a weighted version of the Erdös-Renyi model. |br| |br|\n\n* Havel-Hakimi Generator: |br| For a given realizable degree sequence, the algorithm of Havel and Hakimi generates a graph with exactly this degress sequence. While this is similar to the Chung-Lu model, the generative process promotes the formation of closed traingles, leading to a higher (and possibly more realistic) clustering coefficient. |br| |br|\n\n* Hyperbolic Random Graphs: |br| Using the exponential expansion of space in hyperbolic geometry, Hyperbolic Random Graphs exhibit high clustering, a power-law degree distribution with adjustable exponentn and natural hierarchy. Points are distributed within a disk in the hyperbolic plane, a pair of points is connected if their hyperbolic distance is below a threshold. |br| |br|\n\n* PubWeb Generator: |br| This network model is motivated by the P2P computing library *PubWeb*. Fot the generative process nodes are embedded into the 2D Euclidean unit torus (square with wrap-around boundaries). To create edges, a variation of the disc graph model is employed with a uniform communication radius r for all nodes. A node is connected to up to k nearest neighbors within its communication radius. |br| |br|\n\n\n|separator|\n\n\n\nIntegration with other Tools\n----------------------------\n\nAs a Python module, NetworKit enables seamless integration with Python libraries for scientific computing and data analysis, e.g. :code:`pandas` for dataframe processing and analytics,\n:code:`matplotlib` for plotting, :code:`numpy` and :code:`scipy` for numerical and scientific computing and :code:`networkx` for additional network analysis tasks.\n\nFurthermore, NetworKit provides functions to convert graph objects to NetworkX and thereby connects the two modules. One can also use some of the numerous NetworkX functions by\nimporting NetworkX. This opens up a wide range of possibilities which are not yet or will never be implemented within NetworKit. Note however that NetworkX is written mostly in pure\nPython, its data structures are more memory-intensive and its algorithms do not target very large graphs. You are likely to reach limits of your machine for graphs with millions of edges,\nwhile NetworKit aims for good performance for three more orders of magnitude.\n\n.. TODO: Drawing graphs with Gephi\n\n|separator|\n\n\nArchitecture\n------------\n\nWith the hybrid approach, we are able to combine the performance of C++ with the easy and interactive environment of Python and Jupyter Notebook. We provide a Python package\nthat can be installed easily via pip (see :ref:`Pip install`). This makes it very easy to start working with NetworKit interactively. However, the code can also be used as a\nlibrary for application programming, either at the Python or the C++ level. Throughout the project we use object-oriented and functional concepts. On the C++ level, we make\nextensive use of closures, using the lambda syntax introduced with C++11. Shared-memory parallelism is realized with OpenMP, providing loop parallelization and synchronization\nconstructs while abstracting away the details of thread creation and handling.\n\n.. image:: resources/NetworKit-Architecture.png\n\t:align: center\n\t:width: 700px\n\nConnecting these native implementations to the Python world is enabled by the `Cython <http://cython.org>`_ toolchain. Among other things, Cython can compile pure Python code to\nC or C++, circumventing the Python interpreter, and also allows for static type annotations – yielding considerable speedup in combination. Currently we use Cython merely to\nintegrate native code by compiling it into a native Python extension module. As a benefit of Python integration, NetworKit's functionality can be accessed interactively. Thus,\nanalysis kernels can be freely combined. Furthermore, NetworKit can be seamlessly integrated into the rich Python ecosystem for data analysis. We consider this kind of\nintegration crucial for real-world data analysis workflows.\n\n\n|separator|\n\n\nDesign Goals and Principles\n---------------------------\n\nThere is a variety of software packages which provide graph algorithms in general and network analysis capabilities in particular. However, NetworKit aims to balance a specific combination of strengths:\n\n* Performance: Algorithms and data structures are selected and implemented with high performance and parallelism in mind. Some implementations are among the fastest in published research. For example, community detection in a 3.3 billion edge web graph can be performed on a 16-core server in less than three minutes.\n\n* Usability: Networks are as diverse as the series of questions we might ask of them -- e.g. what is the largest connected component, what are the most central nodes in it and how do they connect to each other? A practical tool for network analysis should therefore provide modular functions which do not restrict the user to predefined workflows. An interactive shell, which the Python language provides, is one prerequisite for that. While NetworKit works with the standard Python 3 interpreter, calling the module from the IPython shell and Jupyter Notebook HTML interface allows us to integrate it into a fully fledged computing environment for scientific workflows, from data preparation to creating figures. It is also easy to set up and control a remote compute server.\n\n* Integration: As a Python module, NetworKit can be seamlessly integrated with Python libraries for scientific computing and data analysis, e.g. pandas for data frame processing and analytics, matplotlib for plotting or numpy and scipy for numerical and scientific computing. For certain tasks, we provide interfaces to external tools, e.g. Gephi for graph visualization.\n\n* Design Principles: Our main focus is on scalable algorithms to support network analysis on massive networks. Several algorithm and implementation patterns are used to achieve this goal: parallelism, fast heuristics and approximation algorithms for problems that are otherwise not solvable in nearly-linear time, efficient data structures, and modular software design.\n" }, { "alpha_fraction": 0.7004559636116028, "alphanum_fraction": 0.7071202993392944, "avg_line_length": 29.655914306640625, "blob_id": "6044415b23c10907a12d05f893e98f78c877d32b", "content_id": "61f04e3c943a2c9006de758353546376ee14c8ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2851, "license_type": "permissive", "max_line_length": 271, "num_lines": 93, "path": "/networkit/cpp/centrality/TopCloseness.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * TopCloseness.h\n *\n * Created on: 03.10.2014\n * Author: ebergamini, michele borassi\n */\n\n#ifndef TOPCLOSENESS_H_\n#define TOPCLOSENESS_H_\n#include \"../graph/Graph.h\"\n#include \"../base/Algorithm.h\"\n#include \"../auxiliary/PrioQueue.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup centrality\n */\nclass TopCloseness : public Algorithm {\npublic:\n\n /**\n\t * Finds the top k nodes with highest closeness centrality faster than computing it for all nodes, based on \"Computing Top-k Closeness Centrality Faster in Unweighted Graphs\", Bergamini et al., ALENEX16.\n\t * The algorithms is based on two independent heuristics, described in the referenced paper. We recommend to use first_heu = true and second_heu = false for complex networks and first_heu = true and second_heu = true for street networks or networks with large diameters.\n\t *\n\t * @param G An unweighted graph.\n\t * @param k Number of nodes with highest closeness that have to be found. For example, if k = 10, the top 10 nodes with highest closeness will be computed.\n\t * @param first_heu If true, the neighborhood-based lower bound is computed and nodes are sorted according to it. If false, nodes are simply sorted by degree.\n\t * @param sec_heu If true, the BFSbound is re-computed at each iteration. If false, BFScut is used.\n\t * @\n\t */\n TopCloseness(const Graph& G, count k = 1, bool first_heu = true, bool sec_heu = true);\n\n /**\n\t* Computes top-k closeness\n\t*\n\t*/\n\tvoid run();\n\n /**\n\t* Returns a list with the k nodes with highest closeness\n\t*\n\t*/\n\tstd::vector<node> topkNodesList();\n\n /**\n\t* Returns a list with the scores of the k nodes with highest closeness\n\t*\n\t*/\n\tstd::vector<edgeweight> topkScoresList();\n\nprotected:\n\tGraph G;\n count n;\n\tcount k;\n\tbool first_heu, sec_heu;\n\tstd::vector<node> topk;\n\tcount visEdges = 0;\n count n_op = 0;\n\tstd::vector<std::vector<node>> levels;\n\tstd::vector<count> nodesPerLev;\n count nLevs = 0;\n std::vector<edgeweight> topkScores;\n\tstd::vector<count> maxlevel;\n\tstd::vector<count> maxlevelSize;\n std::vector<std::vector<count>> subtree;\n std::vector<double> farness;\n std::vector<count> reachL;\n std::vector<count> reachU;\n std::vector<count> component;\n\n\tvoid init();\n double BFScut(node v, double x, bool *visited, count *distances, node *pred, count *visEdges);\n void computelBound1(std::vector<double> &S);\n void BFSbound(node x, std::vector<double> &S, count *visEdges);\n void computeReachable();\n void computeReachableNodesUndir();\n void computeReachableNodesDir();\n};\n\n\ninline std::vector<node> TopCloseness::topkNodesList() {\n\tif (!hasRun) throw std::runtime_error(\"Call run method first\");\n\t return topk;\n}\n\ninline std::vector<edgeweight> TopCloseness::topkScoresList() {\n\tif (!hasRun) throw std::runtime_error(\"Call run method first\");\n\t return topkScores;\n}\n\n} /* namespace NetworKit */\n#endif /* TOPCLOSENESS_H_ */\n" }, { "alpha_fraction": 0.6541095972061157, "alphanum_fraction": 0.6746575236320496, "avg_line_length": 13.600000381469727, "blob_id": "6593159bd924ceaef7a476c6554bec638f63dd30", "content_id": "7a7678af194744210246065276d96f39acb0c7f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 292, "license_type": "permissive", "max_line_length": 45, "num_lines": 20, "path": "/networkit/cpp/graph/test/GraphToolsGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * GraphToolsGTest.h\n *\n * Created on: 22.11.14\n * Author: Maximilian Vogel\n */\n\n#ifndef GRAPHTOOLSGTEST_H\n#define GRAPHTOOLSGTEST_H\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass GraphToolsGTest: public testing::Test {\n};\n\n} /* namespace NetworKit */\n\n#endif /* GRAPHTOOLSGTEST_H */\n" }, { "alpha_fraction": 0.6571767330169678, "alphanum_fraction": 0.6654804348945618, "avg_line_length": 20.252099990844727, "blob_id": "0ccf14b666ba39ad8c25f1aa64e0203f9eec27dd", "content_id": "736d89b400c7f06b7ee587206b0dfb2761ad367f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2529, "license_type": "permissive", "max_line_length": 85, "num_lines": 119, "path": "/networkit/cpp/auxiliary/PrioQueueForInts.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * BucketPQ.cpp\n *\n * Created on: 26.06.2015\n * Author: Henning\n */\n\n#include \"PrioQueueForInts.h\"\n\nnamespace Aux {\n\n\nPrioQueueForInts::PrioQueueForInts(std::vector<index>& prios, index maxPrio):\n\t\tbuckets(maxPrio+1), nodePtr(prios.size()), myBucket(prios.size(), NetworKit::none),\n\t\tminNotEmpty(maxPrio+1), maxNotEmpty(-1), maxPrio(maxPrio), numElems(0)\n{\n\tfor (index i = 0; i < prios.size(); ++i) {\n\t\tif (prios[i] != NetworKit::none) {\n\t\t\tinsert(i, prios[i]);\n\t\t}\n\t}\n}\n\nvoid PrioQueueForInts::changePrio(index elem, index prio) {\n\tremove(elem);\n\tinsert(elem, prio);\n}\n\n\nvoid PrioQueueForInts::insert(index elem, index prio) {\n\tassert(0 <= prio && prio <= maxPrio);\n\tassert(0 <= elem && elem < nodePtr.size());\n\n\tbuckets[prio].push_front(elem);\n\tnodePtr[elem] = buckets[prio].begin();\n\tmyBucket[elem] = prio;\n\t++numElems;\n\n\t// bookkeeping\n\tif (prio < minNotEmpty || minNotEmpty > maxPrio) {\n\t\tminNotEmpty = prio;\n\t}\n\tif (maxNotEmpty < 0 || prio > (unsigned int) maxNotEmpty) {\n\t\tmaxNotEmpty = prio;\n\t}\n}\n\nindex PrioQueueForInts::extractMin() {\n\tif (minNotEmpty > maxPrio) {\n\t\treturn NetworKit::none;\n\t}\n\telse {\n\t\tassert(! buckets[minNotEmpty].empty());\n\t\tindex result = buckets[minNotEmpty].front();\n\t\tremove(result);\n\t\treturn result;\n\t}\n}\n\nindex PrioQueueForInts::extractMax() {\n\tif (maxNotEmpty < 0) {\n\t\treturn NetworKit::none;\n\t}\n\telse {\n\t\tassert(! buckets[maxNotEmpty].empty());\n\t\tindex result = buckets[maxNotEmpty].front();\n\t\tremove(result);\n\t\treturn result;\n\t}\n}\n\nvoid PrioQueueForInts::remove(index elem) {\n\tassert(0 <= elem && elem < nodePtr.size());\n\n\tif (myBucket[elem] != NetworKit::none) {\n\t\t// remove from appropriate bucket\n\t\tindex prio = myBucket[elem];\n\t\tbuckets[prio].erase(nodePtr[elem]);\n\t\tmyBucket[elem] = NetworKit::none;\n\t\t--numElems;\n\n\t\t// adjust max pointer if necessary\n\t\twhile (buckets[maxNotEmpty].empty() && maxNotEmpty >= 0) {\n\t\t\t--maxNotEmpty;\n\t\t}\n\n\t\t// adjust min pointer if necessary\n\t\twhile (buckets[minNotEmpty].empty() && minNotEmpty <= maxPrio) {\n\t\t\t++minNotEmpty;\n\t\t}\n\t}\n}\n\nindex PrioQueueForInts::extractAt(index prio) {\n\tassert(0 <= prio && prio <= maxPrio);\n\tif (buckets[prio].empty()) {\n\t\treturn NetworKit::none;\n\t}\n\telse {\n\t\tindex result = buckets[prio].front();\n\t\tmyBucket[result] = NetworKit::none;\n\t\tbuckets[prio].pop_front();\n\t\treturn result;\n\t}\n}\n\nindex PrioQueueForInts::priority(index elem) {\n\treturn myBucket[elem];\n}\n\nbool PrioQueueForInts::empty() const {\n\treturn (numElems == 0);\n}\n\ncount PrioQueueForInts::size() const {\n\treturn numElems;\n}\n\n} /* namespace Aux */\n" }, { "alpha_fraction": 0.6603699922561646, "alphanum_fraction": 0.6823758482933044, "avg_line_length": 23.569377899169922, "blob_id": "9e125d4347129cd9451fa0c32fd7f948c383a285", "content_id": "b9c70ce56c2b50115adc5cd101fb90c224d4d40b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5135, "license_type": "permissive", "max_line_length": 74, "num_lines": 209, "path": "/networkit/cpp/viz/test/VizGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * PostscriptWriterGTest.cpp\n *\n * Created on: Apr 10, 2013\n * Author: Henning\n */\n\n#ifndef NOGTEST\n\n#include \"VizGTest.h\"\n#include <vector>\n\n#include \"../PostscriptWriter.h\"\n#include \"../FruchtermanReingold.h\"\n#include \"../MaxentStress.h\"\n#include \"../MultilevelLayouter.h\"\n#include \"../../graph/Graph.h\"\n#include \"../../community/ClusteringGenerator.h\"\n#include \"../../generators/ClusteredRandomGraphGenerator.h\"\n#include \"../../io/PartitionWriter.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../io/METISGraphWriter.h\"\n#include \"../../io/DibapGraphReader.h\"\n#include \"../../generators/PubWebGenerator.h\"\n#include \"../../auxiliary/Random.h\"\n\n\nnamespace NetworKit {\n\nVizGTest::VizGTest() {\n\n}\n\nVizGTest::~VizGTest() {\n\n}\n\n\nTEST_F(VizGTest, testPostscriptWriterOnRandomGraph) {\n\tAux::Random::setSeed(1, false);\n\t// create graph\n\tcount n = 60;\n\tcount numClusters = 3;\n\tdouble pin = 0.35;\n\tdouble pout = 0.05;\n\n\tClusteredRandomGraphGenerator graphGen(n, numClusters, pin, pout);\n\tGraph G = graphGen.generate();\n\tG.initCoordinates();\n\n\t// create coordinates\n\tG.forNodes([&](node u) {\n\t\tPoint<float> p(Aux::Random::probability(), Aux::Random::probability());\n\t\tG.setCoordinate(u, p);\n\t});\n\n\t// write graph to file\n\tPostscriptWriter psWriter;\n\tpsWriter.write(G, \"output/testGraph.eps\");\n}\n\n#if !defined _WIN32 && !defined _WIN64 && !defined WIN32 && !defined WIN64\nTEST_F(VizGTest, testPostscriptWriterOnRealGraph) {\n\t// read graph and coordinates from binary file\n\tDibapGraphReader reader;\n\tGraph G = reader.read(\"input/airfoil1.gi\");\n\n\t// write graph to file\n\tPostscriptWriter psWriter;\n\tpsWriter.write(G, \"output/airfoil1.eps\");\n}\n#endif\n\n\nstatic float edgeDistanceSum(Graph& G) {\n\tfloat dist = 0.0f;\n\n\tG.forEdges([&](node u, node v) {\n\t\tPoint<float> p = G.getCoordinate(u) - G.getCoordinate(v);\n\t\tdist += p.length();\n\t});\n\n\treturn dist;\n}\n\nTEST_F(VizGTest, testFRLayouter) {\n \t// create graph\n \tcount n = 80;\n \tcount numClusters = 3;\n \tdouble pin = 0.175;\n \tdouble pout = 0.005;\n\n\tClusteredRandomGraphGenerator graphGen(n, numClusters, pin, pout);\n\tGraph G = graphGen.generate();\n \tG.initCoordinates();\n \tINFO(\"Number of edges: \", G.numberOfEdges());\n\n \t// draw (independent of clustering) and write again\n \tPoint<float> bl(0.0, 0.0);\n \tPoint<float> tr(1.0, 1.0);\n\n \tFruchtermanReingold fdLayouter(bl, tr);\n \tfdLayouter.draw(G);\n \tPostscriptWriter psWriter(true);\n \tpsWriter.write(G, \"output/testForceGraph.eps\");\n\n \t// test edge distances\n \tfloat dist = edgeDistanceSum(G);\n \tfloat avg = dist / (float) G.numberOfEdges();\n \tINFO(\"avg edge length: \", avg);\n \tEXPECT_LE(avg, 0.25);\n}\n\n TEST_F(VizGTest, tryMaxentLayouter) {\n \t// create graph\n \tcount n = 80;\n \tcount numClusters = 3;\n \tdouble pin = 0.175;\n \tdouble pout = 0.005;\n\n\tClusteredRandomGraphGenerator graphGen(n, numClusters, pin, pout);\n\tGraph G = graphGen.generate();\n \tG.initCoordinates();\n \tINFO(\"Number of edges: \", G.numberOfEdges());\n\n \t// draw (independent of clustering) and write again\n \tPoint<float> bl(0.0, 0.0);\n \tPoint<float> tr(1.0, 1.0);\n\n \tMaxentStress msLayouter(bl, tr);\n \tmsLayouter.draw(G);\n \tPostscriptWriter psWriter(true);\n \tpsWriter.write(G, \"output/testMaxentGraph.eps\");\n\n \t// test edge distances\n \tfloat dist = edgeDistanceSum(G);\n \tfloat avg = dist / (float) G.numberOfEdges();\n \tDEBUG(\"avg edge length: \", avg);\n \tEXPECT_LE(avg, 0.25);\n}\n\n TEST_F(VizGTest, tryMultilevelLayouter) {\n \t// create graph\n \tcount n = 300;\n \tcount numClusters = 4;\n \tdouble pin = 0.1;\n \tdouble pout = 0.005;\n\n\tClusteredRandomGraphGenerator graphGen(n, numClusters, pin, pout);\n\tGraph G = graphGen.generate();\n \tG.initCoordinates();\n \tINFO(\"Number of edges: \", G.numberOfEdges());\n\n \tMETISGraphWriter gWriter;\n \tgWriter.write(G, \"output/testMultilevelGraph.graph\");\n\n \t// draw (independent of clustering) and write again\n \tPoint<float> bl(0.0, 0.0);\n \tPoint<float> tr(1.0, 1.0);\n\n \tMultilevelLayouter mlLayouter(bl, tr);\n \tmlLayouter.draw(G);\n \tPostscriptWriter psWriter4(true);\n \tpsWriter4.write(G, \"output/testMultilevelGraph.eps\");\n\n \t// test edge distances\n \tfloat dist = edgeDistanceSum(G);\n \tfloat avg = dist / (float) G.numberOfEdges();\n \tDEBUG(\"avg edge length: \", avg);\n \tEXPECT_LE(avg, 0.25);\n}\n\n TEST_F(VizGTest, tryGraphDrawing) {\n \t// create graph\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/lesmis.graph\");\n\n \t// draw (independent of clustering) and write again\n \tPoint<float> bl(0.0, 0.0);\n \tPoint<float> tr(1.0, 1.0);\n\n \tFruchtermanReingold fdLayouter(bl, tr);\n \tfdLayouter.draw(G);\n \tPostscriptWriter psWriter2(true);\n \tpsWriter2.write(G, \"output/testLesmisFR.eps\");\n\n \t// test edge distances\n \tfloat dist = edgeDistanceSum(G);\n \tfloat avg = dist / (float) G.numberOfEdges();\n \tINFO(\"avg edge length: \", avg);\n \tEXPECT_LE(avg, 0.25);\n\n \tMultilevelLayouter mlLayouter(bl, tr);\n \tmlLayouter.draw(G);\n \tPostscriptWriter psWriter4(true);\n \tpsWriter4.write(G, \"output/testLesmisMl.eps\");\n\n \t// test edge distances\n \tdist = edgeDistanceSum(G);\n \tavg = dist / (float) G.numberOfEdges();\n \tINFO(\"avg edge length: \", avg);\n \tEXPECT_LE(avg, 0.25);\n}\n\n\n} /* namespace NetworKit */\n\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.7200621962547302, "alphanum_fraction": 0.7200621962547302, "avg_line_length": 19.74193572998047, "blob_id": "9aa49910f3e4baee83aaa965e99295ccf8d52787", "content_id": "8894a678ce9ca436f2d294a22f94f3e5ebcb965a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1286, "license_type": "permissive", "max_line_length": 80, "num_lines": 62, "path": "/networkit/cpp/io/KONECTGraphReader.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * KONECTReader.h\n *\n */\n\n#ifndef KONECTGRAPHREADER_H_\n#define KONECTGRAPHREADER_H_\n\n#include <fstream>\n#include <iostream>\n#include <string>\n#include <unordered_map>\n\n\n#include \"GraphReader.h\"\n\nnamespace NetworKit {\n\n/*\n * KONECTGraphReader.cpp\n * \n * Reader for the KONECT graph format, \n * based on the EdgeListReader.\n * \n * The KONECT format is described in detail in \n * http://konect.uni-koblenz.de/downloads/konect-handbook.pdf\n */\nclass KONECTGraphReader: public NetworKit::GraphReader {\n\npublic:\n\n\tKONECTGraphReader() = default; //nullary constructor for Python shell\n\n\t/**\n\t * @param[in]\tignoreLoops\tignores loops in the input graph file, if set to true\n\t * @param[in]\tseparator\tcharacter used to separate values of a line\n\t */\n\tKONECTGraphReader(char separator, bool ignoreLoops=false);\n\n\t/**\n\t * Given the path of an input file, read the graph contained.\n\t *\n\t * @param[in]\tpath\tinput file path\n\t */\n\tGraph read(const std::string& path);\n\n\nprotected:\n\tchar separator; \t//!< character separating nodes in an edge line\n\tstd::string commentPrefix;\n\tnode firstNode;\n\tbool continuous;\n//\tstd::unordered_map<index,node> mapNodeIds;\n\tbool ignoreLoops;\n\nprivate:\n\tGraph readContinuous(const std::string& path);\n\n};\n\n} /* namespace NetworKit */\n#endif /* KONECTGRAPHREADER_H_ */\n" }, { "alpha_fraction": 0.6732945442199707, "alphanum_fraction": 0.6764901876449585, "avg_line_length": 27.37657356262207, "blob_id": "9d4aa8860c18e8cfd2e6d50ad9c0dc555bbc649a", "content_id": "d0de364702a3913d8ac04d199c9939a2f9f573f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 45062, "license_type": "permissive", "max_line_length": 250, "num_lines": 1588, "path": "/networkit/cpp/graph/Graph.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Graph.h\n *\n * Created on: 01.06.2014\n * Author: Christian Staudt ([email protected]), Klara Reichard ([email protected]), Marvin Ritter ([email protected])\n */\n\n#ifndef GRAPH_H_\n#define GRAPH_H_\n\n#include <algorithm>\n#include <vector>\n#include <stack>\n#include <queue>\n#include <utility>\n#include <stdexcept>\n#include <functional>\n#include <unordered_set>\n\n#include \"../Globals.h\"\n#include \"Coordinates.h\"\n#include \"../viz/Point.h\"\n#include \"../auxiliary/Random.h\"\n#include \"../auxiliary/FunctionTraits.h\"\n#include \"../auxiliary/Log.h\"\n\nnamespace NetworKit {\n\n\n/**\n * A weighted edge used for the graph constructor with\n * initializer list syntax.\n */\nstruct WeightedEdge {\n node u, v;\n edgeweight weight;\n\n WeightedEdge(node u, node v, edgeweight w) : u(u), v(v), weight(w) {\n }\n};\ninline bool operator<(const WeightedEdge& e1, const WeightedEdge& e2) {\n return e1.weight < e2.weight;\n}\nstruct Edge {\n node u, v;\n\n Edge(node _u, node _v, bool sorted = false) {\n if (sorted) {\n u = std::min(_u, _v);\n v = std::max(_u, _v);\n } else {\n u = _u;\n v = _v;\n }\n }\n};\ninline bool operator==(const Edge& e1, const Edge& e2) {\n return e1.u == e2.u && e1.v == e2.v;\n}\n}\n\nnamespace std {\n template<>\n struct hash<NetworKit::Edge> {\n size_t operator()(const NetworKit::Edge& e) const {\n return hash_node(e.u) ^ hash_node(e.v);\n }\n\n hash<NetworKit::node> hash_node;\n };\n}\n\nnamespace NetworKit {\n\n/**\n * @ingroup graph\n * A graph (with optional weights) and parallel iterator methods.\n */\nclass Graph final {\n\n\tfriend class ParallelPartitionCoarsening;\n\tfriend class GraphBuilder;\n\nprivate:\n\t// graph attributes\n\tcount id; //!< unique graph id, starts at 0\n\tstd::string name; //!< name of the graph, initially G#ID\n\n\t// scalars\n\tcount n; //!< current number of nodes\n\tcount m; //!< current number of edges\n\tcount storedNumberOfSelfLoops; //!< current number of self loops, edges which have the same origin and target\n\tnode z; //!< current upper bound of node ids, z will be the id of the next node\n\tedgeid omega; \t//!< current upper bound of edge ids, will be the id of the next edge\n\tcount t; //!< current time step\n\n\tbool weighted; //!< true if the graph is weighted, false otherwise\n\tbool directed; //!< true if the graph is directed, false otherwise\n\tbool edgesIndexed; //!< true if edge ids have been assigned\n\n\t// per node data\n\tstd::vector<bool> exists; //!< exists[v] is true if node v has not been removed from the graph\n\tCoordinates<float> coordinates; //!< coordinates of nodes (if present)\n\n\tstd::vector<count> inDeg; //!< only used for directed graphs, number of edges incoming per node\n\tstd::vector<count> outDeg; //!< degree of every node, zero if node was removed. For directed graphs only outgoing edges count\n\n\tstd::vector< std::vector<node> > inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes u that have an edge (u, v)\n\tstd::vector< std::vector<node> > outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in outEdges[u] and for undirected also u in outEdges[v]\n\n\tstd::vector< std::vector<edgeweight> > inEdgeWeights; //!< only used for directed graphs, same schema as inEdges\n\tstd::vector< std::vector<edgeweight> > outEdgeWeights; //!< same schema (and same order!) as outEdges\n\n\tstd::vector< std::vector<edgeid> > inEdgeIds; //!< only used for directed graphs, same schema as inEdges\n\tstd::vector< std::vector<edgeid> > outEdgeIds; //!< same schema (and same order!) as outEdges\n\n\t/**\n\t * Returns the next unique graph id.\n\t */\n\tcount getNextGraphId();\n\n\t/**\n\t * Returns the index of node u in the array of incoming edges of node v. (for directed graphs inEdges is searched, while for indirected outEdges is searched, which gives the same result as indexInOutEdgeArray).\n\t */\n\tindex indexInInEdgeArray(node v, node u) const;\n\n\t/**\n\t * Returns the index of node v in the array of outgoing edges of node u.\n\t */\n\tindex indexInOutEdgeArray(node u, node v) const;\n\n\t/**\n\t * Returns the edge weight of the outgoing edge of index i in the outgoing edges of node u\n\t * @param u The node\n\t * @param i The index\n\t * @return The weight of the outgoing edge or defaultEdgeWeight if the graph is unweighted\n\t */\n\ttemplate<bool hasWeights>\n\tinline edgeweight getOutEdgeWeight(node u, index i) const;\n\n\t/**\n\t * Returns the edge weight of the incoming edge of index i in the incoming edges of node u\n\t *\n\t * @param u The node\n\t * @param i The index in the incoming edge array\n\t * @return The weight of the incoming edge\n\t */\n\ttemplate<bool hasWeights>\n\tinline edgeweight getInEdgeWeight(node u, index i) const;\n\n\t/**\n\t * Returns the edge id of the edge of index i in the outgoing edges of node u\n\t *\n\t * @param u The node\n\t * @param i The index in the outgoing edges\n\t * @return The edge id\n\t */\n\ttemplate<bool graphHasEdgeIds>\n\tinline edgeid getOutEdgeId(node u, index i) const;\n\n\t/**\n\t * Returns the edge id of the edge of index i in the incoming edges of node u\n\t *\n\t * @param u The node\n\t * @param i The index in the incoming edges of u\n\t * @return The edge id\n\t */\n\ttemplate<bool graphHasEdgeIds>\n\tinline edgeid getInEdgeId(node u, index i) const;\n\n\t/**\n\t * @brief Returns if the edge (u, v) shall be used in the iteration of all edgesIndexed\n\t *\n\t * @param u The source node of the edge\n\t * @param v The target node of the edge\n\t * @return If the node shall be used, i.e. if v is not none and in the undirected case if u >= v\n\t */\n\ttemplate<bool graphIsDirected>\n\tinline bool useEdgeInIteration(node u, node v) const;\n\n\t/**\n\t * @brief Implementation of the for loop for outgoing edges of u\n\t *\n\t * Note: If all (valid) outgoing edges shall be considered, graphIsDirected needs to be set to true\n\t *\n\t * @param u The node\n\t * @param handle The handle that shall be executed for each edge\n\t * @return void\n\t */\n\ttemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\n\tinline void forOutEdgesOfImpl(node u, L handle) const;\n\n\t/**\n\t * @brief Implementation of the for loop for incoming edges of u\n\t *\n\t * For undirected graphs, this is the same as forOutEdgesOfImpl but u and v are changed in the handle\n\t *\n\t * @param u The node\n\t * @param handle The handle that shall be executed for each edge\n\t * @return void\n\t */\n\ttemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\n\tinline void forInEdgesOfImpl(node u, L handle) const;\n\n\t/**\n\t * @brief Implementation of the for loop for all edges, @see forEdges\n\t *\n\t * @param handle The handle that shall be executed for all edges\n\t * @return void\n\t */\n\ttemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\n\tinline void forEdgeImpl(L handle) const;\n\n\t/**\n\t * @brief Parallel implementation of the for loop for all edges, @see parallelForEdges\n\t *\n\t * @param handle The handle that shall be executed for all edges\n\t * @return void\n\t */\n\ttemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\n\tinline void parallelForEdgesImpl(L handle) const;\n\n\t/**\n\t * @brief Summation variant of the parallel for loop for all edges, @see parallelSumForEdges\n\t *\n\t * @param handle The handle that shall be executed for all edges\n\t * @return void\n\t */\n\ttemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\n\tinline double parallelSumForEdgesImpl(L handle) const;\n\n\t/*\n\t * In the following definition, Aux::FunctionTraits is used in order to only execute lambda functions\n\t * with the appropriate parameters. The decltype-return type is used for determining the return type of\n\t * the lambda (needed for summation) but also determines if the lambda accepts the correct number of parameters.\n\t * Otherwise the return type declaration fails and the function is excluded from overload resoluation.\n\t * Then there are multiple possible lambdas with three (third parameter id or weight) and two (second parameter\n\t * can be second node id or edge weight for neighbor iterators). This is checked using Aux::FunctionTraits and\n\t * std::enable_if. std::enable_if only defines the type member when the given bool is true, this bool comes from\n\t * std::is_same which compares two types. The function traits give either the parameter type or if it is out of bounds\n\t * they define type as void.\n\t */\n\n\t/**\n\t * Triggers a static assert error when no other method is chosen. Because of the use of \"...\" as arguments, the priority\n\t * of this method is lower than the priority of the other methods. This method avoids ugly and unreadable template substitution\n\t * error messages from the other declarations.\n\t */\n\ttemplate<class F, void* = (void*)0>\n\ttypename Aux::FunctionTraits<F>::result_type edgeLambda(F&f, ...) const {\n\t\t// the strange condition is used in order to delay the eveluation of the static assert to the moment when this function is actually used\n\t\tstatic_assert(! std::is_same<F, F>::value, \"Your lambda does not support the required parameters or the parameters have the wrong type.\");\n\t\treturn std::declval<typename Aux::FunctionTraits<F>::result_type>(); // use the correct return type (this won't compile)\n\t}\n\n\t/**\n\t * Calls the given function f if its fourth argument is of the type edgeid and third of type edgeweight\n\t * Note that the decltype check is not enough as edgeweight can be casted to node and we want to assure that .\n\t */\n\ttemplate < class F,\n\t typename std::enable_if <\n\t (Aux::FunctionTraits<F>::arity >= 3) &&\n\t std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value &&\n\t std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<3>::type>::value\n\t >::type * = (void*)0 >\n\tauto edgeLambda(F &f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew, id)) {\n\t\treturn f(u, v, ew, id);\n\t}\n\n\n\t/**\n\t * Calls the given function f if its third argument is of the type edgeid, discards the edge weight\n\t * Note that the decltype check is not enough as edgeweight can be casted to node.\n\t */\n\ttemplate<class F,\n\t\t\t typename std::enable_if<\n\t\t\t (Aux::FunctionTraits<F>::arity >= 2) &&\n\t\t\t std::is_same<edgeid, typename Aux::FunctionTraits<F>::template arg<2>::type>::value &&\n\t\t\t std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value /* prevent f(v, weight, eid) */\n\t\t\t >::type* = (void*)0>\n\tauto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, id)) {\n\t\treturn f(u, v, id);\n\t}\n\n\t/**\n\t * Calls the given function f if its third argument is of type edgeweight, discards the edge id\n\t * Note that the decltype check is not enough as node can be casted to edgeweight.\n\t */\n\ttemplate<class F,\n\t\t\t typename std::enable_if<\n\t\t\t (Aux::FunctionTraits<F>::arity >= 2) &&\n\t\t\t std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<2>::type>::value\n\t\t\t >::type* = (void*)0>\n\tauto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v, ew)) {\n\t\treturn f(u, v, ew);\n\t}\n\n\n\t/**\n\t * Calls the given function f if it has only two arguments and the second argument is of type node,\n\t * discards edge weight and id\n\t * Note that the decltype check is not enough as edgeweight can be casted to node.\n\t */\n\ttemplate<class F,\n\t\t\t typename std::enable_if<\n\t\t\t (Aux::FunctionTraits<F>::arity >= 1) &&\n\t\t\t std::is_same<node, typename Aux::FunctionTraits<F>::template arg<1>::type>::value\n\t\t\t >::type* = (void*)0>\n\tauto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, v)) {\n\t\t\treturn f(u, v);\n\t}\n\n\t/**\n\t * Calls the given function f if it has only two arguments and the second argument is of type edgeweight,\n\t * discards the first node and the edge id\n\t * Note that the decltype check is not enough as edgeweight can be casted to node.\n\t */\n\ttemplate<class F,\n\t\t\t typename std::enable_if<\n\t\t\t (Aux::FunctionTraits<F>::arity >= 1) &&\n\t\t\t std::is_same<edgeweight, typename Aux::FunctionTraits<F>::template arg<1>::type>::value\n\t\t\t >::type* = (void*)0>\n\tauto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(u, ew)) {\n\t\treturn f(v, ew);\n\t}\n\n\n\t/**\n\t * Calls the given function f if it has only one argument, discards the first\n\t * node id, the edge weight and the edge id\n\t */\n\ttemplate<class F,\n\t\t\t void* = (void*)0>\n\tauto edgeLambda(F&f, node u, node v, edgeweight ew, edgeid id) const -> decltype(f(v)) {\n\t\treturn f(v);\n\t}\n\n\n\t/**\n\t * Calls the given BFS handle with distance parameter\n\t */\n\ttemplate <class F>\n\tauto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u, dist)) {\n\t\treturn f(u, dist);\n\t}\n\n\t/**\n\t * Calls the given BFS handle without distance parameter\n\t */\n\ttemplate <class F>\n\tauto callBFSHandle(F &f, node u, count dist) const -> decltype(f(u)) {\n\t\treturn f(u);\n\t}\n\npublic:\n\n\t/**\n\t * Create a graph of @a n nodes. The graph has assignable edge weights if @a weighted is set to <code>true</code>.\n\t * If @a weighted is set to <code>false</code> each edge has edge weight 1.0 and any other weight assignment will\n\t * be ignored.\n\t * @param n Number of nodes.\n\t * @param weighted If set to <code>true</code>, the graph has edge weights.\n\t * @param directed If set to @c true, the graph will be directed.\n\t */\n\tGraph(count n = 0, bool weighted = false, bool directed = false);\n\n\tGraph(const Graph& G, bool weighted, bool directed);\n\n\t/**\n\t * Generate a weighted graph from a list of edges. (Useful for small\n\t * graphs in unit tests that you do not want to read from a file.)\n\t *\n\t * @param[in] edges list of weighted edges\n\t */\n\t Graph(std::initializer_list<WeightedEdge> edges);\n\n\n\t/**\n\t * Create a graph as copy of @a other.\n\t * @param other The graph to copy.\n\t */\n\tGraph(const Graph& other) = default;\n\n\t/** Default move constructor */\n\tGraph(Graph&& other) = default;\n\n\t/** Default destructor */\n\t~Graph() = default;\n\n\t/** Default move assignment operator */\n\tGraph& operator=(Graph&& other) = default;\n\n\t/** Default copy assignment operator */\n\tGraph& operator=(const Graph& other) = default;\n\n\t/** EDGE IDS **/\n\n\t/**\n\t* Initially assign integer edge identifiers.\n\t*\n\t* @param force Force re-indexing of edges even if they have already been indexed\n\t*/\n\tvoid indexEdges(bool force = false);\n\n\t/**\n\t* Checks if edges have been indexed\n\t*\n\t* @return bool if edges have been indexed\n\t*/\n\tbool hasEdgeIds() const { return edgesIndexed; }\n\n\t/**\n\t* Get the id of the given edge.\n\t*/\n\tedgeid edgeId(node u, node v) const;\n\n\t/**\n\t* Get an upper bound for the edge ids in the graph.\n\t* @return An upper bound for the edge ids.\n\t*/\n\tindex upperEdgeIdBound() const { return omega; }\n\n\n\t/** GRAPH INFORMATION **/\n\n\t/**\n\t * Get the ID of this graph. The ID is a unique unsigned integer given to\n\t * every graph on construction.\n\t */\n\tcount getId() const { return id; }\n\n\t/**\n\t * Return the type of the graph.\n\t * \t\tGraph: not weighted, undirected\n\t * \t\tWeightedGraph: weighted, undirected\n\t * \t\tDirectedGraph: not weighted, directed\n\t * \t\tWeightedDirectedGraph: weighted, directed\n\t */\n\tstd::string typ() const;\n\n\t/**\n\t * Try to save some memory by shrinking internal data structures of the graph. Only run this\n\t * once you finished editing the graph. Otherwise it will cause unnecessary reallocation of\n\t * memory.\n\t */\n\tvoid shrinkToFit();\n\n\t/**\n\t * Compacts the adjacency arrays by re-using no longer neede slots from deleted edges.\n\t */\n\tvoid compactEdges();\n\n\t/**\n\t * Sorts the adjacency arrays by node id. While the running time is linear this\n\t * temporarily duplicates the memory.\n\t */\n\tvoid sortEdges();\n\n\t/**\n\t * Set name of graph to @a name.\n\t * @param name The name.\n\t */\n\tvoid setName(std::string name) { this->name = name; }\n\n\t/*\n\t * Returns the name of the graph.\n\t * @return The name of the graph.\n\t */\n\tstd::string getName() const { return name; }\n\n\n\t/**\n\t * Returns a string representation of the graph.\n\t * @return A string representation.\n\t */\n\tstd::string toString() const;\n\n\n\t/* COPYING */\n\n\t/*\n\t* Copies all nodes to a new graph\n\t* @return graph with the same nodes.\n\t*/\n\tGraph copyNodes() const;\n\n\n\t/* NODE MODIFIERS */\n\n\t/**\n\t * Add a new node to the graph and return it.\n\t * @return The new node.\n\t */\n\tnode addNode();\n\n\t/**\n\t * DEPRECATED: Coordinates should be handled outside the Graph class\n\t * like general node attributes.\n\t *\n\t * Add a new node to the graph with coordinates @a x and @y and return it.\n\t */\n\t// TODO: remove method\n\t// [[deprecated(\"Deprecated: Node coordinates should be stored externally like any other node attribute\")]]\n\tnode addNode(float x, float y);\n\n\t/**\n\t * Remove an isolated node @a v from the graph.\n\t *\n\t * @param u Node.\n\t * @note Although it would be convenient to remove all incident edges at the same time,\n\t * this causes complications for dynamic applications. Therefore, removeNode is an\n\t * atomic event. All incident edges need to be removed first and an exception is thrown\n\t * otherwise.\n\t */\n\tvoid removeNode(node v);\n\n\t/**\n\t * Check if node @a v exists in the graph.\n\t *\n\t * @param v Node.\n\t * @return @c true if @a v exists, @c false otherwise.\n\t */\n\n\tbool hasNode(node v) const { return (v < z) && this->exists[v];\t}\n\n\n\t/**\n\t * Restores a previously deleted node @a v with its previous id in the graph.\n\t *\n\t * @param v Node.\n\t *\n\t */\n\n\tvoid restoreNode(node v);\n\n\n\t// SET OPERATIONS\n\n\t/**\n\t * Appends another graph to this graph as a new subgraph. Performs node\n\t * id remapping.\n\t * @param G [description]\n\t */\n\tvoid append(const Graph& G);\n\n\t/**\n\t * Modifies this graph to be the union of it and another graph.\n\t * Nodes with the same ids are identified with each other.\n\t * @param G [description]\n\t */\n\tvoid merge(const Graph& G);\n\n\n\t// SUBGRAPHS\n\n\tGraph subgraphFromNodes(const std::unordered_set<node>& nodes) const;\n\n\n\t/** NODE PROPERTIES **/\n\n\t/**\n\t * Returns the number of outgoing neighbors of @a v.\n\t *\n\t * @param v Node.\n\t * @return The number of outgoing neighbors.\n\t */\n\tcount degree(node v) const { return outDeg[v]; }\n\n\t/**\n\t * Get the number of incoming neighbors of @a v.\n\t *\n\t * @param v Node.\n\t * @return The number of incoming neighbors.\n\t * @note If the graph is not directed, the outgoing degree is returned.\n\t */\n\tcount degreeIn(node v) const { return directed ? inDeg[v] : outDeg[v]; }\n\n\t/**\n\t * Get the number of outgoing neighbors of @a v.\n\t *\n\t * @param v Node.\n\t * @return The number of outgoing neighbors.\n\t */\n\tcount degreeOut(node v) const { return outDeg[v]; }\n\n\t/**\n\t * Check whether @a v is isolated, i.e. degree is 0.\n\t * @param v Node.\n\t * @return @c true if the node is isolated (= degree is 0)\n\t */\n\tbool isIsolated(node v) const { return outDeg[v] == 0 && (!directed || inDeg[v] == 0); }\n\n\n\t/**\n\t * Returns the weighted degree of @a v.\n\t *\n\t * @param v Node.\n\t * @return Weighted degree of @a v.\n\t * @note For directed graphs this is the sum of weights of all outgoing edges of @a v.\n\t */\n\tedgeweight weightedDegree(node v) const;\n\n\t/**\n\t * Returns the volume of the @a v, which is the weighted degree with self-loops counted twice.\n\t *\n\t * @param v Node.\n\t * @return The volume of the @a v.\n\t */\n\tedgeweight volume(node v) const;\n\n\t/**\n\t * Returns a random node of the graph.\n\t * @return A random node.\n\t */\n\tnode randomNode() const;\n\n\t/**\n\t * Returns a random neighbor of @a u and @c none if degree is zero.\n\t *\n\t * @param u Node.\n\t * @return A random neighbor of @a u.\n\t */\n\tnode randomNeighbor(node u) const;\n\n\n\t/* EDGE MODIFIERS */\n\n\t/**\n\t * Insert an edge between the nodes @a u and @a v. If the graph is weighted you can optionally\n\t * set a weight for this edge. The default weight is 1.0.\n\t * Note: Multi-edges are not supported and will NOT be handled consistently by the graph data\n\t * structure.\n\t * @param u Endpoint of edge.\n\t * @param v Endpoint of edge.\n\t * @param weight Optional edge weight.\n\t */\n\tvoid addEdge(node u, node v, edgeweight ew = defaultEdgeWeight);\n\n\t/**\n\t * Removes the undirected edge {@a u,@a v}.\n\t * @param u Endpoint of edge.\n\t * @param v Endpoint of edge.\n\t */\n\tvoid removeEdge(node u, node v);\n\n\t/**\n\t * Removes all self-loops in the graph.\n\t */\n\tvoid removeSelfLoops();\n\n\t/**\n\t * Changes the edges {@a s1, @a t1} into {@a s1, @a t2} and the edge {@a s2, @a t2} into {@a s2, @a t1}.\n\t *\n\t * If there are edge weights or edge ids, they are preserved. Note that no check is performed if the swap is actually possible, i.e. does not generate duplicate edges.\n\t *\n\t * @param s1 The first source\n\t * @param t1 The first target\n\t * @param s2 The second source\n\t * @param t2 The second target\n\t */\n\tvoid swapEdge(NetworKit::node s1, NetworKit::node t1, NetworKit::node s2, NetworKit::node t2);\n\n\t/**\n\t * Checks if undirected edge {@a u,@a v} exists in the graph.\n\t * @param u Endpoint of edge.\n\t * @param v Endpoint of edge.\n\t * @return <code>true</code> if the edge exists, <code>false</code> otherwise.\n\t */\n\tbool hasEdge(node u, node v) const;\n\n\t/**\n\t * Returns a random edge. By default a random node u is chosen and then some random neighbor v. So the probability of choosing (u, v) highly\n\t * depends on the degree of u.\n\t * Setting uniformDistribution to true, will give you a real uniform distributed edge, but will be very slow. So only use uniformDistribution\n\t * for single calls outside of any loops.\n\t */\n\tstd::pair<node, node> randomEdge(bool uniformDistribution = false) const;\n\n\t/**\n\t * Returns a vector with nr random edges. The edges are chosen uniform random.\n\t */\n\tstd::vector< std::pair<node, node> > randomEdges(count nr) const;\n\n\t/* GLOBAL PROPERTIES */\n\n\t/**\n\t * Returns <code>true</code> if this graph supports edge weights other than 1.0.\n\t * @return <code>true</code> if this graph supports edge weights other than 1.0.\n\t */\n\tbool isWeighted() const { return weighted; }\n\n\t/**\n\t * Return @c true if this graph supports directed edges.\n\t * @return @c true if this graph supports directed edges.\n\t */\n\tbool isDirected() const { return directed; }\n\n\t/**\n\t * Return <code>true</code> if graph contains no nodes.\n\t * @return <code>true</code> if graph contains no nodes.\n\t */\n\tbool isEmpty() const { return n == 0; }\n\n\t/**\n\t * Return the number of nodes in the graph.\n\t * @return The number of nodes.\n\t */\n\tcount numberOfNodes() const { return n; }\n\n\t/**\n\t * Return the number of edges in the graph.\n\t * @return The number of edges.\n\t */\n\tcount numberOfEdges() const { return m; }\n\n\n\t/**\n\t* @return a pair (n, m) where n is the number of nodes and m is the number of edges\n\t*/\n\tstd::pair<count, count> const size() { return {n, m}; };\n\n\n\t/**\n\t * @return the density of the graph\n\t */\n\tdouble density() const {\n\t\tcount n = numberOfNodes();\n\t\tcount m = numberOfEdges();\n\t\tcount loops = numberOfSelfLoops();\n\t\tm -= loops;\n\t\tdouble d;\n\t\tif (isDirected()) {\n\t\t\td = m / (double) (n * (n-1));\n\t\t} else {\n\t\t\td = (2 * m) / (double) (n * (n-1));\n\t\t}\n\t\treturn d;\n\t}\n\n\t/**\n\t * Return the number of loops {v,v} in the graph.\n\t * @return The number of loops.\n\t * @note This involves calculation, so store result if needed multiple times.\n\t */\n\tcount numberOfSelfLoops() const;\n\n \t/**\n\t * Get an upper bound for the node ids in the graph.\n\t * @return An upper bound for the node ids.\n\t */\n\tindex upperNodeIdBound() const { return z; }\n\n\t/**\n\t * Check for invalid graph states, such as multi-edges.\n\t * @return False if the graph is in invalid state.\n\t */\n\tbool checkConsistency() const;\n\n\n\t/* DYNAMICS */\n\n\t/**\n\t * Trigger a time step - increments counter.\n\t */\n\tvoid timeStep() { t++; }\n\n\t/**\n\t * Get time step counter.\n\t * @return Time step counter.\n\t */\n\tcount time() { return t; }\n\n\n\t/* COORDINATES */\n\n\t/**\n\t * DEPRECATED: Coordinates should be handled outside the Graph class\n\t * like general node attributes.\n\t *\n\t * Sets the coordinate of @a v to @a value.\n\t *\n\t * @param v Node.\n\t * @param value The coordinate of @a v.\n\t */\n\t// TODO: remove method\n\t// [[deprecated(\"Deprecated: Node coordinates should be stored externally like any other node attribute\")]]\n\tvoid setCoordinate(node v, Point<float> value) { coordinates.setCoordinate(v, value); }\n\n\n\t/**\n\t * DEPRECATED: Coordinates should be handled outside the Graph class\n\t * like general node attributes.\n\t *\n\t * Get the coordinate of @a v.\n\t * @param v Node.\n\t * @return The coordinate of @a v.\n\t */\n\t// TODO: remove method\n\t// [[deprecated(\"Deprecated: Node coordinates should be stored externally like any other node attribute\")]]\n\tPoint<float>& getCoordinate(node v) { return coordinates.getCoordinate(v); }\n\n\t/**\n\t * DEPRECATED: Coordinates should be handled outside the Graph class\n\t * like general node attributes.\n\t *\n\t * Get minimum coordinate of all coordinates with respect to dimension @a dim.\n\t * @param dim The dimension to search for minimum.\n\t * @return The minimum coordinate in dimension @a dim.\n\t */\n\t// TODO: remove method\n\t// [[deprecated(\"Deprecated: Node coordinates should be stored externally like any other node attribute\")]]\n\tfloat minCoordinate(count dim) { return coordinates.minCoordinate(dim); }\n\n\t/**\n\t * DEPRECATED: Coordinates should be handled outside the Graph class\n\t * like general node attributes.\n\t *\n\t * Get maximum coordinate of all coordinates with respect to dimension @a dim.\n\t * @param dim The dimension to search for maximum.\n\t * @return The maximum coordinate in dimension @a dim.\n\t */\n\t// TODO: remove method\n\t// [[deprecated(\"Deprecated: Node coordinates should be stored externally like any other node attribute\")]]\n\tfloat maxCoordinate(count dim) { return coordinates.maxCoordinate(dim); }\n\n\t/**\n\t * DEPRECATED: Coordinates should be handled outside the Graph class\n\t * like general node attributes.\n\t *\n\t * Initializes the coordinates for the nodes in graph.\n\t * @note This has to be called once and before you set coordinates. Call this method again if new nodes have\n\t * been added.\n\t */\n\t// TODO: remove method\n\t// [[deprecated(\"Deprecated: Node coordinates should be stored externally like any other node attribute\")]]\n\tvoid initCoordinates() { coordinates.init(z); }\n\n\n\t/* EDGE ATTRIBUTES */\n\n\t/**\n\t * Return edge weight of edge {@a u,@a v}. Returns 0 if edge does not exist.\n\t * BEWARE: Running time is \\Theta(deg(u))!\n\t *\n\t * @param u Endpoint of edge.\n\t * @param v Endpoint of edge.\n\t * @return Edge weight of edge {@a u,@a v} or 0 if edge does not exist.\n\t */\n\tedgeweight weight(node u, node v) const;\n\n\t/**\n\t * Set the weight of an edge. If the edge does not exist,\n\t * it will be inserted.\n\t *\n\t * @param[in]\tu\tendpoint of edge\n\t * @param[in]\tv\tendpoint of edge\n\t * @param[in]\tweight\tedge weight\n\t */\n\tvoid setWeight(node u, node v, edgeweight ew);\n\n\t/**\n\t * Increase the weight of an edge. If the edge does not exist,\n\t * it will be inserted.\n\t *\n\t * @param[in]\tu\tendpoint of edge\n\t * @param[in]\tv\tendpoint of edge\n\t * @param[in]\tweight\tedge weight\n\t */\n\tvoid increaseWeight(node u, node v, edgeweight ew);\n\n\n\n\t/* SUMS */\n\n\t/**\n\t * Returns the sum of all edge weights.\n\t * @return The sum of all edge weights.\n\t */\n\tedgeweight totalEdgeWeight() const;\n\n\n\t/* Collections */\n\n\t/**\n\t * Get list of all nodes.\n\t * @return List of all nodes.\n\t */\n\tstd::vector<node> nodes() const;\n\n\t/**\n\t * Get list of edges as node pairs.\n\t * @return List of edges as node pairs.\n\t */\n\tstd::vector<std::pair<node, node> > edges() const;\n\n\t/**\n\t * Get list of neighbors of @a u.\n\t *\n\t * @param u Node.\n\t * @return List of neighbors of @a u.\n\t */\n\tstd::vector<node> neighbors(node u) const;\n\n\n\t/* Derivative Graphs */\n\n\t/**\n\t* Return an undirected version of this graph.\n\t*\n\t* @return undirected graph.\n\t*/\n\tGraph toUndirected() const;\n\n\n\t/**\n\t* Return an unweighted version of this graph.\n\t*\n\t* @return unweighted graph.\n\t*/\n\tGraph toUnweighted() const;\n\n\t/**\n\t * Return the transpose of this graph. The graph must be directed.\n\t *\n\t * @return transpose of the graph.\n\t */\n\tGraph transpose() const;\n\n\t/* NODE ITERATORS */\n\n\t/**\n\t * Iterate over all nodes of the graph and call @a handle (lambda closure).\n\t *\n\t * @param handle Takes parameter <code>(node)</code>.\n\t */\n\ttemplate<typename L> void forNodes(L handle) const;\n\n\t/**\n\t * Iterate randomly over all nodes of the graph and call @a handle (lambda closure).\n\t *\n\t * @param handle Takes parameter <code>(node)</code>.\n\t */\n\ttemplate<typename L> void parallelForNodes(L handle) const;\n\n\t/** Iterate over all nodes of the graph and call @a handle (lambda closure) as long as @a condition remains true.\n\t * This allows for breaking from a node loop.\n\t *\n\t * @param condition Returning <code>false</code> breaks the loop.\n\t * @param handle Takes parameter <code>(node)</code>.\n\t */\n\ttemplate<typename C, typename L> void forNodesWhile(C condition, L handle) const;\n\n\t/**\n\t * Iterate randomly over all nodes of the graph and call @a handle (lambda closure).\n\t *\n\t * @param handle Takes parameter <code>(node)</code>.\n\t */\n\ttemplate<typename L> void forNodesInRandomOrder(L handle) const;\n\n\t/**\n\t * Iterate in parallel over all nodes of the graph and call handler (lambda closure).\n\t * Using schedule(guided) to remedy load-imbalances due to e.g. unequal degree distribution.\n\t *\n\t * @param handle Takes parameter <code>(node)</code>.\n\t */\n\ttemplate<typename L> void balancedParallelForNodes(L handle) const;\n\n\n\t/**\n\t * Iterate over all undirected pairs of nodes and call @a handle (lambda closure).\n\t *\n\t * @param handle Takes parameters <code>(node, node)</code>.\n\t */\n\ttemplate<typename L> void forNodePairs(L handle) const;\n\n\n\t/**\n\t * Iterate over all undirected pairs of nodes in parallel and call @a handle (lambda closure).\n\t *\n\t * @param handle Takes parameters <code>(node, node)</code>.\n\t */\n\ttemplate<typename L> void parallelForNodePairs(L handle) const;\n\n\n\t/* EDGE ITERATORS */\n\n\t/**\n\t * Iterate over all edges of the const graph and call @a handle (lambda closure).\n\t *\n\t * @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>.\n\t */\n\ttemplate<typename L> void forEdges(L handle) const;\n\n\t/**\n\t * Iterate in parallel over all edges of the const graph and call @a handle (lambda closure).\n\t *\n\t * @param handle Takes parameters <code>(node, node)</code> or <code>(node, node, edgweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code>.\n\t */\n\ttemplate<typename L> void parallelForEdges(L handle) const;\n\n\n\t/* NEIGHBORHOOD ITERATORS */\n\n\t/**\n\t * Iterate over all neighbors of a node and call @a handle (lamdba closure).\n\t *\n\t * @param u Node.\n\t * @param handle Takes parameter <code>(node)</code> or <code>(node, edgeweight)</code> which is a neighbor of @a u.\n\t * @note For directed graphs only outgoing edges from @a u are considered.\n\t * A node is its own neighbor if there is a self-loop.\n\t *\n\t */\n\ttemplate<typename L> void forNeighborsOf(node u, L handle) const;\n\n\t/**\n\t * Iterate over all incident edges of a node and call @a handle (lamdba closure).\n\t *\n\t * @param u Node.\n\t * @param handle Takes parameters <code>(node, node)</code>, <code>(node, node, edgeweight)</code>, <code>(node, node, edgeid)</code> or <code>(node, node, edgeweight, edgeid)</code> where the first node is @a u and the second is a neighbor of @a u.\n\t * @note For undirected graphs all edges incident to @a u are also outgoing edges.\n\t */\n\ttemplate<typename L> void forEdgesOf(node u, L handle) const;\n\n\t/**\n\t * Iterate over all neighbors of a node and call handler (lamdba closure).\n\t * For directed graphs only incoming edges from u are considered.\n\t */\n\ttemplate<typename L> void forInNeighborsOf(node u, L handle) const;\n\n\t/**\n\t * Iterate over all incoming edges of a node and call handler (lamdba closure).\n\t * @note For undirected graphs all edges incident to u are also incoming edges.\n\t *\n\t * Handle takes parameters (u, v) or (u, v, w) where w is the edge weight.\n\t */\n\ttemplate<typename L> void forInEdgesOf(node u, L handle) const;\n\n\t/* REDUCTION ITERATORS */\n\n\t/**\n\t * Iterate in parallel over all nodes and sum (reduce +) the values returned by the handler\n\t */\n\ttemplate<typename L> double parallelSumForNodes(L handle) const;\n\n\t/**\n\t * Iterate in parallel over all edges and sum (reduce +) the values returned by the handler\n\t */\n\ttemplate<typename L> double parallelSumForEdges(L handle) const;\n\n\n\t/* GRAPH SEARCHES */\n\n\t/**\n\t * Iterate over nodes in breadth-first search order starting from r until connected component\n\t * of r has been visited.\n\t *\n\t * @param r Node.\n\t * @param handle Takes parameter <code>(node)</code>.\n\t */\n\ttemplate<typename L> void BFSfrom(node r, L handle) const;\n\ttemplate<typename L> void BFSfrom(const std::vector<node> &startNodes, L handle) const;\n\n\ttemplate<typename L> void BFSEdgesFrom(node r, L handle) const;\n\n\t/**\n\t * Iterate over nodes in depth-first search order starting from r until connected component\n\t * of r has been visited.\n\t *\n\t * @param r Node.\n\t * @param handle Takes parameter <code>(node)</code>.\n\t */\n\ttemplate<typename L> void DFSfrom(node r, L handle) const;\n\n\n\ttemplate<typename L> void DFSEdgesFrom(node r, L handle) const;\n};\n\n/* NODE ITERATORS */\n\ntemplate<typename L>\nvoid Graph::forNodes(L handle) const {\n\tfor (node v = 0; v < z; ++v) {\n\t\tif (exists[v]) {\n\t\t\thandle(v);\n\t\t}\n\t}\n}\n\ntemplate<typename L>\nvoid Graph::parallelForNodes(L handle) const {\n\t#pragma omp parallel for\n\tfor (node v = 0; v < z; ++v) {\n\t\tif (exists[v]) {\n\t\t\thandle(v);\n\t\t}\n\t}\n}\n\ntemplate<typename C, typename L>\nvoid Graph::forNodesWhile(C condition, L handle) const {\n\tfor (node v = 0; v < z; ++v) {\n\t\tif (exists[v]) {\n\t\t\tif (!condition()) {\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\thandle(v);\n\t\t}\n\t}\n}\n\ntemplate<typename L>\nvoid Graph::forNodesInRandomOrder(L handle) const {\n\tstd::vector<node> randVec = nodes();\n\tstd::shuffle(randVec.begin(), randVec.end(), Aux::Random::getURNG());\n\tfor (node v : randVec) {\n\t\thandle(v);\n\t}\n}\n\ntemplate<typename L>\nvoid Graph::balancedParallelForNodes(L handle) const {\n\t#pragma omp parallel for schedule(guided) // TODO: define min block size (and test it!)\n\tfor (node v = 0; v < z; ++v) {\n\t\tif (exists[v]) {\n\t\t\thandle(v);\n\t\t}\n\t}\n}\n\ntemplate<typename L>\nvoid Graph::forNodePairs(L handle) const {\n\tfor (node u = 0; u < z; ++u) {\n\t\tif (exists[u]) {\n\t\t\tfor (node v = u + 1; v < z; ++v) {\n\t\t\t\tif (exists[v]) {\n\t\t\t\t\thandle(u, v);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\ntemplate<typename L>\nvoid Graph::parallelForNodePairs(L handle) const {\n\t#pragma omp parallel for schedule(guided)\n\tfor (node u = 0; u < z; ++u) {\n\t\tif (exists[u]) {\n\t\t\tfor (node v = u + 1; v < z; ++v) {\n\t\t\t\tif (exists[v]) {\n\t\t\t\t\thandle(u, v);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n/* EDGE ITERATORS */\n\n/* HELPERS */\n\ntemplate<bool hasWeights> // implementation for weighted == true\ninline edgeweight Graph::getOutEdgeWeight(node u, index i) const {\n\treturn outEdgeWeights[u][i];\n}\n\ntemplate<> // implementation for weighted == false\ninline edgeweight Graph::getOutEdgeWeight<false>(node, index) const {\n\treturn defaultEdgeWeight;\n}\n\ntemplate<bool hasWeights> // implementation for weighted == true\ninline edgeweight Graph::getInEdgeWeight(node u, index i) const {\n\treturn inEdgeWeights[u][i];\n}\n\ntemplate<> // implementation for weighted == false\ninline edgeweight Graph::getInEdgeWeight<false>(node, index) const {\n\treturn defaultEdgeWeight;\n}\n\n\ntemplate<bool graphHasEdgeIds> // implementation for hasEdgeIds == true\ninline edgeid Graph::getOutEdgeId(node u, index i) const {\n\treturn outEdgeIds[u][i];\n}\n\ntemplate<> // implementation for hasEdgeIds == false\ninline edgeid Graph::getOutEdgeId<false>(node, index) const {\n\treturn 0;\n}\n\ntemplate<bool graphHasEdgeIds> // implementation for hasEdgeIds == true\ninline edgeid Graph::getInEdgeId(node u, index i) const {\n\treturn inEdgeIds[u][i];\n}\n\ntemplate<> // implementation for hasEdgeIds == false\ninline edgeid Graph::getInEdgeId<false>(node, index) const {\n\treturn 0;\n}\n\n\ntemplate<bool graphIsDirected> // implementation for graphIsDirected == true\ninline bool Graph::useEdgeInIteration(node u, node v) const {\n\treturn v != none;\n}\n\ntemplate<> // implementation for graphIsDirected == false\ninline bool Graph::useEdgeInIteration<false>(node u, node v) const {\n\treturn u >= v;\n}\n\ntemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\ninline void Graph::forOutEdgesOfImpl(node u, L handle) const {\n\tfor (index i = 0; i < outEdges[u].size(); ++i) {\n\t\tnode v = outEdges[u][i];\n\n\t\tif (useEdgeInIteration<graphIsDirected>(u, v)) {\n\t\t\tedgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));\n\t\t}\n\t}\n}\n\ntemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\ninline void Graph::forInEdgesOfImpl(node u, L handle) const {\n\tif (graphIsDirected) {\n\t\tfor (index i = 0; i < inEdges[u].size(); i++) {\n\t\t\tnode v = inEdges[u][i];\n\n\t\t\tif (useEdgeInIteration<true>(u, v)) {\n\t\t\t\tedgeLambda<L>(handle, u, v, getInEdgeWeight<hasWeights>(u, i), getInEdgeId<graphHasEdgeIds>(u, i));\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor (index i = 0; i < outEdges[u].size(); ++i) {\n\t\t\tnode v = outEdges[u][i];\n\n\t\t\tif (useEdgeInIteration<true>(u, v)) {\n\t\t\t\tedgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));\n\t\t\t}\n\t\t}\n\t}\n}\n\ntemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\ninline void Graph::forEdgeImpl(L handle) const {\n\tfor (node u = 0; u < z; ++u) {\n\t\tforOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle);\n\t}\n}\n\ntemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\ninline void Graph::parallelForEdgesImpl(L handle) const {\n\t#pragma omp parallel for schedule(guided)\n\tfor (node u = 0; u < z; ++u) {\n\t\tforOutEdgesOfImpl<graphIsDirected, hasWeights, graphHasEdgeIds, L>(u, handle);\n\t}\n}\n\ntemplate<bool graphIsDirected, bool hasWeights, bool graphHasEdgeIds, typename L>\ninline double Graph::parallelSumForEdgesImpl(L handle) const {\n\tdouble sum = 0.0;\n\n\t#pragma omp parallel for reduction(+:sum)\n\n\tfor (node u = 0; u < z; ++u) {\n\t\tfor (index i = 0; i < outEdges[u].size(); ++i) {\n\t\t\tnode v = outEdges[u][i];\n\n\t\t\t// undirected, do not iterate over edges twice\n\t\t\t// {u, v} instead of (u, v); if v == none, u > v is not fulfilled\n\t\t\tif (useEdgeInIteration<graphIsDirected>(u, v)) {\n\t\t\t\tsum += edgeLambda<L>(handle, u, v, getOutEdgeWeight<hasWeights>(u, i), getOutEdgeId<graphHasEdgeIds>(u, i));\n\t\t\t}\n\t\t}\n\t}\n\n\treturn sum;\n}\n\ntemplate<typename L>\nvoid Graph::forEdges(L handle) const {\n\tswitch (weighted + 2 * directed + 4 * edgesIndexed) {\n\tcase 0: // unweighted, undirected, no edgeIds\n\t\tforEdgeImpl<false, false, false, L>(handle);\n\t\tbreak;\n\n\tcase 1: // weighted, undirected, no edgeIds\n\t\tforEdgeImpl<false, true, false, L>(handle);\n\t\tbreak;\n\n\tcase 2: // unweighted, directed, no edgeIds\n\t\tforEdgeImpl<true, false, false, L>(handle);\n\t\tbreak;\n\n\tcase 3: // weighted, directed, no edgeIds\n\t\tforEdgeImpl<true, true, false, L>(handle);\n\t\tbreak;\n\n\tcase 4: // unweighted, undirected, with edgeIds\n\t\tforEdgeImpl<false, false, true, L>(handle);\n\t\tbreak;\n\n\tcase 5: // weighted, undirected, with edgeIds\n\t\tforEdgeImpl<false, true, true, L>(handle);\n\t\tbreak;\n\n\tcase 6: // unweighted, directed, with edgeIds\n\t\tforEdgeImpl<true, false, true, L>(handle);\n\t\tbreak;\n\n\tcase 7: // weighted, directed, with edgeIds\n\t\tforEdgeImpl<true, true, true, L>(handle);\n\t\tbreak;\n\t}\n}\n\n\ntemplate<typename L>\nvoid Graph::parallelForEdges(L handle) const {\n\tswitch (weighted + 2 * directed + 4 * edgesIndexed) {\n\tcase 0: // unweighted, undirected, no edgeIds\n\t\tparallelForEdgesImpl<false, false, false, L>(handle);\n\t\tbreak;\n\n\tcase 1: // weighted, undirected, no edgeIds\n\t\tparallelForEdgesImpl<false, true, false, L>(handle);\n\t\tbreak;\n\n\tcase 2: // unweighted, directed, no edgeIds\n\t\tparallelForEdgesImpl<true, false, false, L>(handle);\n\t\tbreak;\n\n\tcase 3: // weighted, directed, no edgeIds\n\t\tparallelForEdgesImpl<true, true, false, L>(handle);\n\t\tbreak;\n\n\tcase 4: // unweighted, undirected, with edgeIds\n\t\tparallelForEdgesImpl<false, false, true, L>(handle);\n\t\tbreak;\n\n\tcase 5: // weighted, undirected, with edgeIds\n\t\tparallelForEdgesImpl<false, true, true, L>(handle);\n\t\tbreak;\n\n\tcase 6: // unweighted, directed, with edgeIds\n\t\tparallelForEdgesImpl<true, false, true, L>(handle);\n\t\tbreak;\n\n\tcase 7: // weighted, directed, with edgeIds\n\t\tparallelForEdgesImpl<true, true, true, L>(handle);\n\t\tbreak;\n\t}\n}\n\n\n\n/* NEIGHBORHOOD ITERATORS */\n\ntemplate<typename L>\nvoid Graph::forNeighborsOf(node u, L handle) const {\n\tforEdgesOf(u, handle);\n}\n\ntemplate<typename L>\nvoid Graph::forEdgesOf(node u, L handle) const {\n\tswitch (weighted + 2 * edgesIndexed) {\n\tcase 0: //not weighted, no edge ids\n\t\tforOutEdgesOfImpl<true, false, false, L>(u, handle);\n\t\tbreak;\n\n\tcase 1:\t//weighted, no edge ids\n\t\tforOutEdgesOfImpl<true, true, false, L>(u, handle);\n\t\tbreak;\n\n\tcase 2: //not weighted, with edge ids\n\t\tforOutEdgesOfImpl<true, false, true, L>(u, handle);\n\t\tbreak;\n\n\tcase 3:\t//weighted, with edge ids\n\t\tforOutEdgesOfImpl<true, true, true, L>(u, handle);\n\t\tbreak;\n\t}\n}\n\ntemplate<typename L>\nvoid Graph::forInNeighborsOf(node u, L handle) const {\n\tforInEdgesOf(u, handle);\n}\n\ntemplate<typename L>\nvoid Graph::forInEdgesOf(node u, L handle) const {\n\tswitch (weighted + 2 * directed + 4 * edgesIndexed) {\n\tcase 0: //unweighted, undirected, no edge ids\n\t\tforInEdgesOfImpl<false, false, false, L>(u, handle);\n\t\tbreak;\n\n\tcase 1: //weighted, undirected, no edge ids\n\t\tforInEdgesOfImpl<false, true, false, L>(u, handle);\n\t\tbreak;\n\n\tcase 2: //unweighted, directed, no edge ids\n\t\tforInEdgesOfImpl<true, false, false, L>(u, handle);\n\t\tbreak;\n\n\tcase 3: //weighted, directed, no edge ids\n\t\tforInEdgesOfImpl<true, true, false, L>(u, handle);\n\t\tbreak;\n\n\tcase 4: //unweighted, undirected, with edge ids\n\t\tforInEdgesOfImpl<false, false, true, L>(u, handle);\n\t\tbreak;\n\n\tcase 5: //weighted, undirected, with edge ids\n\t\tforInEdgesOfImpl<false, true, true, L>(u, handle);\n\t\tbreak;\n\n\tcase 6: //unweighted, directed, with edge ids\n\t\tforInEdgesOfImpl<true, false, true, L>(u, handle);\n\t\tbreak;\n\n\tcase 7: //weighted, directed, with edge ids\n\t\tforInEdgesOfImpl<true, true, true, L>(u, handle);\n\t\tbreak;\n\t}\n}\n\n/* REDUCTION ITERATORS */\n\ntemplate<typename L>\ndouble Graph::parallelSumForNodes(L handle) const {\n\tdouble sum = 0.0;\n\t#pragma omp parallel for reduction(+:sum)\n\n\tfor (node v = 0; v < z; ++v) {\n\t\tif (exists[v]) {\n\t\t\tsum += handle(v);\n\t\t}\n\t}\n\n\treturn sum;\n}\n\ntemplate<typename L>\ndouble Graph::parallelSumForEdges(L handle) const {\n\tdouble sum = 0.0;\n\n\tswitch (weighted + 2 * directed + 4 * edgesIndexed) {\n\tcase 0: // unweighted, undirected, no edge ids\n\t\tsum = parallelSumForEdgesImpl<false, false, false, L>(handle);\n\t\tbreak;\n\n\tcase 1: // weighted, undirected, no edge ids\n\t\tsum = parallelSumForEdgesImpl<false, true, false, L>(handle);\n\t\tbreak;\n\n\tcase 2: // unweighted, directed, no edge ids\n\t\tsum = parallelSumForEdgesImpl<true, false, false, L>(handle);\n\t\tbreak;\n\n\tcase 3: // weighted, directed, no edge ids\n\t\tsum = parallelSumForEdgesImpl<true, true, false, L>(handle);\n\t\tbreak;\n\n\tcase 4: // unweighted, undirected, with edge ids\n\t\tsum = parallelSumForEdgesImpl<false, false, true, L>(handle);\n\t\tbreak;\n\n\tcase 5: // weighted, undirected, with edge ids\n\t\tsum = parallelSumForEdgesImpl<false, true, true, L>(handle);\n\t\tbreak;\n\n\tcase 6: // unweighted, directed, with edge ids\n\t\tsum = parallelSumForEdgesImpl<true, false, true, L>(handle);\n\t\tbreak;\n\n\tcase 7: // weighted, directed, with edge ids\n\t\tsum = parallelSumForEdgesImpl<true, true, true, L>(handle);\n\t\tbreak;\n\t}\n\n\treturn sum;\n}\n\n\n/* GRAPH SEARCHES */\n\ntemplate<typename L>\nvoid Graph::BFSfrom(node r, L handle) const {\n\tstd::vector<node> startNodes(1, r);\n\tBFSfrom(startNodes, handle);\n}\n\ntemplate<typename L>\nvoid Graph::BFSfrom(const std::vector<node> &startNodes, L handle) const {\n\tstd::vector<bool> marked(z);\n\tstd::queue<node> q, qNext;\n\tcount dist = 0;\n\t// enqueue start nodes\n\tfor (node u : startNodes) {\n\t\tq.push(u);\n\t\tmarked[u] = true;\n\t}\n\tdo {\n\t\tnode u = q.front();\n\t\tq.pop();\n\t\t// apply function\n\t\tcallBFSHandle(handle, u, dist);\n\t\tforNeighborsOf(u, [&](node v) {\n\t\t\tif (!marked[v]) {\n\t\t\t\tqNext.push(v);\n\t\t\t\tmarked[v] = true;\n\t\t\t}\n\t\t});\n\t\tif (q.empty() && !qNext.empty()) {\n\t\t\tq.swap(qNext);\n\t\t\t++dist;\n\t\t}\n\t} while (!q.empty());\n}\n\ntemplate<typename L>\nvoid Graph::BFSEdgesFrom(node r, L handle) const {\n\tstd::vector<bool> marked(z);\n\tstd::queue<node> q;\n\tq.push(r); // enqueue root\n\tmarked[r] = true;\n\tdo {\n\t\tnode u = q.front();\n\t\tq.pop();\n\t\t// apply function\n\t\tforNeighborsOf(u, [&](node, node v, edgeweight w, edgeid eid) {\n\t\t\tif (!marked[v]) {\n\t\t\t\thandle(u, v, w, eid);\n\t\t\t\tq.push(v);\n\t\t\t\tmarked[v] = true;\n\t\t\t}\n\t\t});\n\t} while (!q.empty());\n}\n\ntemplate<typename L>\nvoid Graph::DFSfrom(node r, L handle) const {\n\tstd::vector<bool> marked(z);\n\tstd::stack<node> s;\n\ts.push(r); // enqueue root\n\tmarked[r] = true;\n\tdo {\n\t\tnode u = s.top();\n\t\ts.pop();\n\t\t// apply function\n\t\thandle(u);\n\t\tforNeighborsOf(u, [&](node v) {\n\t\t\tif (!marked[v]) {\n\t\t\t\ts.push(v);\n\t\t\t\tmarked[v] = true;\n\t\t\t}\n\t\t});\n\t} while (!s.empty());\n}\n\ntemplate<typename L>\nvoid Graph::DFSEdgesFrom(node r, L handle) const {\n\tstd::vector<bool> marked(z);\n\tstd::stack<node> s;\n\ts.push(r); // enqueue root\n\tmarked[r] = true;\n\tdo {\n\t\tnode u = s.top();\n\t\ts.pop();\n\t\t// apply function\n\t\tforNeighborsOf(u, [&](node v) {\n\t\t\tif (!marked[v]) {\n\t\t\t\thandle(u, v);\n\t\t\t\ts.push(v);\n\t\t\t\tmarked[v] = true;\n\t\t\t}\n\t\t});\n\t} while (!s.empty());\n}\n\n\n\n\n} /* namespace NetworKit */\n\n#endif /* GRAPH_H_ */\n" }, { "alpha_fraction": 0.5007004737854004, "alphanum_fraction": 0.5508545637130737, "avg_line_length": 17.396907806396484, "blob_id": "d1fc2cca97c749663ee339d79561fbb703c04a6b", "content_id": "30686a28bbbe7fa8b98ba7279648b3a998b056c4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3569, "license_type": "permissive", "max_line_length": 66, "num_lines": 194, "path": "/networkit/cpp/graph/test/SSSPGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * SSSPGTest.cpp\n *\n * Created on: 21.07.2014\n * Author: ebergamini\n */\n\n#include \"SSSPGTest.h\"\n#include \"../DynBFS.h\"\n#include \"../BFS.h\"\n#include \"../DynDijkstra.h\"\n#include \"../Dijkstra.h\"\n#include \"../../io/METISGraphReader.h\"\n#include \"../../auxiliary/Log.h\"\n\n#include <stack>\n\n\nnamespace NetworKit {\n\nTEST_F(SSSPGTest, testDijkstra) {\n/* Graph:\n ______\n\t\t/ \\\n\t 0 3 6\n\t\t\\ / \\ /\n\t\t 2 5\n\t\t/ \\ / \\\n\t 1 4 7\n*/\n\tint n = 8;\n\tGraph G(n, true);\n\n\tG.addEdge(0, 2);\n\tG.addEdge(1, 2);\n\tG.addEdge(2, 3);\n\tG.addEdge(2, 4);\n\tG.addEdge(3, 5);\n\tG.addEdge(4, 5);\n\tG.addEdge(5, 6);\n\tG.addEdge(5, 7);\n\tG.addEdge(0, 6);\n\n\n\tDijkstra sssp(G, 5, true, true);\n\tsssp.run();\n\tstd::vector<node> stack = sssp.getStack();\n#if LOG_LEVEL >= LOG_LEVEL_DEBUG\n\twhile (!stack.empty()) {\n\t\tDEBUG(stack.back());\n\t\tstack.pop_back();\n\t}\n#endif\n}\n\nTEST_F(SSSPGTest, testShortestPaths) {\n\tMETISGraphReader reader;\n\tGraph G = reader.read(\"input/PGPgiantcompo.graph\");\n\tINFO(\"The graph has been read.\");\n\tint source = 2;\n\tBFS bfs(G, source);\n\tbfs.run();\n\tbigfloat max = 0;\n\tnode x;\n\tG.forNodes([&](node n){\n\t\tif(bfs.numberOfPaths(n) > max) {\n\t\t\tmax = bfs.numberOfPaths(n);\n\t\t\tx = n;\n\t\t}\n\t});\n\tcount dist = bfs.distance(x);\n\tstd::set<std::vector<node>> paths = bfs.getPaths(x, true);\n\tcount i = 0;\n\tfor (auto path : paths) {\n\t\tINFO(\"Path number \", i);\n\t\ti ++;\n\t\tINFO(path);\n\t\tEXPECT_EQ(path[0], source);\n\t\tEXPECT_EQ(path[dist], x);\n\t}\n\tINFO(\"Maximum number of shortest paths: \", bfs.numberOfPaths(x));\n\tINFO(\"Distance: \", dist);\n}\n\nTEST_F(SSSPGTest, testGetAllShortestPaths) {\n/* Graph:\n\n\t 0 3 6 9\n\t\t\\ / \\ / \\ /\n 2 5 8\n\t\t/ \\ / \\ / \\\n\t 1 4 7 10\n*/\n\tint n = 11;\n\tGraph G(n, false);\n\tG.addEdge(0, 2);\n\tG.addEdge(1, 2);\n\tG.addEdge(2, 3);\n\tG.addEdge(2, 4);\n\tG.addEdge(3, 5);\n\tG.addEdge(4, 5);\n\tG.addEdge(5, 6);\n\tG.addEdge(5, 7);\n\tG.addEdge(6, 8);\n\tG.addEdge(7, 8);\n\tG.addEdge(8, 9);\n\tG.addEdge(8, 10);\n\tDijkstra sssp(G, 0, true, false);\n\tsssp.run();\n\tstd::set<std::vector<node>> paths = sssp.getPaths(9, true);\n\tcount i = 0;\n\tfor (auto path : paths) {\n\t\tINFO(\"Path number \", i);\n\t\ti ++;\n\t\tfor (node n : path) {\n\t\t\tINFO(n);\n\t\t}\n\t}\n}\n\nTEST_F(SSSPGTest, testDirectedBFS) {\n/* Graph:\n ________\n\t\t/ \\.\n\t 0 3. 6\n\t\t\\. ./ \\ ./\n\t\t 2 .5\n\t\t./ \\. / \\.\n\t 1 4 7\n*/\n\tint n = 8;\n\t// G directed unweighted\n\tGraph G(n, false, true);\n\n\tG.addEdge(0, 6);\n\tG.addEdge(0, 2);\n\tG.addEdge(3, 2);\n\tG.addEdge(5, 3);\n\tG.addEdge(6, 5);\n\tG.addEdge(5, 7);\n\tG.addEdge(4, 5);\n\tG.addEdge(2, 4);\n\tG.addEdge(2, 1);\n\n\n\tBFS sssp(G, 0);\n\tsssp.run();\n\tEXPECT_EQ(sssp.distance(0), 0);\n\tEXPECT_EQ(sssp.distance(1), 2);\n\tEXPECT_EQ(sssp.distance(2), 1);\n\tEXPECT_EQ(sssp.distance(3), 3);\n\tEXPECT_EQ(sssp.distance(4), 2);\n\tEXPECT_EQ(sssp.distance(5), 2);\n\tEXPECT_EQ(sssp.distance(6), 1);\n\tEXPECT_EQ(sssp.distance(7), 3);\n}\n\nTEST_F(SSSPGTest, testDirectedDijkstra) {\n/* Graph:\n ________\n\t\t/ \\.\n\t 0 3. 6\n\t\t\\. ./ \\ ./\n\t\t 2 .5\n\t\t./ \\. / \\.\n\t 1 4 7\n*/\n\tint n = 8;\n\t// G directed unweighted\n\tGraph G(n, false, true);\n\n\tG.addEdge(0, 6, 1);\n\tG.addEdge(0, 2, 1);\n\tG.addEdge(3, 2, 1);\n\tG.addEdge(5, 3, 1);\n\tG.addEdge(6, 5, 1);\n\tG.addEdge(5, 7, 1);\n\tG.addEdge(4, 5, 1);\n\tG.addEdge(2, 4, 1);\n\tG.addEdge(2, 1, 1);\n\n\n\tDijkstra sssp(G, 0);\n\tsssp.run();\n\tEXPECT_EQ(sssp.distance(0), 0);\n\tEXPECT_EQ(sssp.distance(1), 2);\n\tEXPECT_EQ(sssp.distance(2), 1);\n\tEXPECT_EQ(sssp.distance(3), 3);\n\tEXPECT_EQ(sssp.distance(4), 2);\n\tEXPECT_EQ(sssp.distance(5), 2);\n\tEXPECT_EQ(sssp.distance(6), 1);\n\tEXPECT_EQ(sssp.distance(7), 3);\n}\n}\n" }, { "alpha_fraction": 0.7027276158332825, "alphanum_fraction": 0.7082565426826477, "avg_line_length": 31.297618865966797, "blob_id": "489ce865a5ec6026dd7df744fb8a2f12c2bc4c00", "content_id": "3f3d53ef1c51415dcc22625366e92489c96cb4d4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5426, "license_type": "permissive", "max_line_length": 149, "num_lines": 168, "path": "/networkit/cpp/numerics/LAMG/Lamg.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Lamg.cpp\n *\n * Created on: Oct 20, 2015\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"Lamg.h\"\n\n#include \"../../components/ParallelConnectedComponents.h\"\n#include \"../GaussSeidelRelaxation.h\"\n#include \"omp.h\"\n\nnamespace NetworKit {\n\n\nLamg::Lamg(const double tolerance) : LinearSolver(tolerance), validSetup(false), lamgSetup(smoother), numComponents(0) {\n}\n\nvoid Lamg::initializeForOneComponent() {\n\tcompHierarchies = std::vector<LevelHierarchy>(1);\n\tlamgSetup.setup(laplacianMatrix, compHierarchies[0]);\n\tcompSolvers.clear();\n\tcompSolvers.push_back(SolverLamg(compHierarchies[0], smoother));\n\tvalidSetup = true;\n}\n\nvoid Lamg::setupConnected(const CSRMatrix &laplacianMatrix) {\n\tthis->laplacianMatrix = laplacianMatrix;\n\tinitializeForOneComponent();\n\tnumComponents = 1;\n}\n\nvoid Lamg::setup(const CSRMatrix &laplacianMatrix) {\n\tthis->laplacianMatrix = laplacianMatrix;\n\tGraph G = CSRMatrix::matrixToGraph(laplacianMatrix);\n\tParallelConnectedComponents con(G, false);\n\tcon.run();\n\tnumComponents = con.numberOfComponents();\n\tif (numComponents == 1) {\n\t\tinitializeForOneComponent();\n\t} else {\n\t\tgraph2Components = std::vector<index>(G.numberOfNodes());\n\n\t\tinitialVectors = std::vector<Vector>(numComponents);\n\t\trhsVectors = std::vector<Vector>(numComponents);\n\n\t\tcomponents = std::vector<std::vector<index>>(numComponents);\n\t\tcompHierarchies = std::vector<LevelHierarchy>(numComponents);\n\t\tcompSolvers.clear();\n\t\tcompStati = std::vector<LAMGSolverStatus>(numComponents);\n\n\t\t// create solver for every component\n\t\tindex compIdx = 0;\n\t\tfor (auto component : con.getPartition().getSubsets()) {\n\t\t\tcomponents[compIdx] = std::vector<index>(component.begin(), component.end());\n\n\t\t\tstd::vector<std::pair<index,index>> positions;\n\t\t\tstd::vector<double> values;\n\n\t\t\tindex idx = 0;\n\t\t\tfor (node u : components[compIdx]) {\n\t\t\t\tgraph2Components[u] = idx;\n\t\t\t\tidx++;\n\t\t\t}\n\n\t\t\tfor (node u : components[compIdx]) {\n\t\t\t\tG.forNeighborsOf(u, [&](node v, edgeweight w) {\n\t\t\t\t\tpositions.push_back(std::make_pair(graph2Components[u], graph2Components[v]));\n\t\t\t\t\tvalues.push_back(w);\n\t\t\t\t});\n\t\t\t}\n\t\t\tCSRMatrix compMatrix(component.size(), component.size(), positions, values);\n\t\t\tinitialVectors[compIdx] = Vector(component.size());\n\t\t\trhsVectors[compIdx] = Vector(component.size());\n\t\t\tlamgSetup.setup(compMatrix, compHierarchies[compIdx]);\n\t\t\tcompSolvers.push_back(SolverLamg(compHierarchies[compIdx], smoother));\n\t\t\tLAMGSolverStatus status;\n\t\t\tstatus.desiredResidualReduction = tolerance * component.size() / G.numberOfNodes();\n\t\t\tcompStati[compIdx] = status;\n\n\t\t\tcompIdx++;\n\t\t}\n\n\t\tvalidSetup = true;\n\t}\n}\n\nSolverStatus Lamg::solve(const Vector &rhs, Vector &result, count maxConvergenceTime, count maxIterations) {\n\tif (!validSetup || result.getDimension() != laplacianMatrix.numberOfColumns()\n\t\t\t|| rhs.getDimension() != laplacianMatrix.numberOfRows()) {\n\t\tthrow std::runtime_error(\"No or wrong matrix is setup for given vectors.\");\n\t}\n\n\tSolverStatus status;\n\n\tif (numComponents == 1) {\n\t\tLAMGSolverStatus stat;\n\t\tstat.desiredResidualReduction = tolerance * rhs.length() / (laplacianMatrix * result - rhs).length();\n\t\tstat.maxIters = maxIterations;\n\t\tstat.maxConvergenceTime = maxConvergenceTime;\n\t\tcompSolvers[0].solve(result, rhs, stat);\n\n\t\tstatus.residual = stat.residual;\n\t\tstatus.numIters = stat.numIters;\n\t\tstatus.converged = stat.converged;\n\t} else {\n\t\t// solve on every component\n\t\tcount maxIters = 0;\n\t\tfor (index i = 0; i < components.size(); ++i) {\n\t\t\tfor (auto element : components[i]) {\n\t\t\t\tinitialVectors[i][graph2Components[element]] = result[element];\n\t\t\t\trhsVectors[i][graph2Components[element]] = rhs[element];\n\t\t\t}\n\n\t\t\tdouble resReduction = tolerance * rhsVectors[i].length() / (compHierarchies[i].at(0).getLaplacian() * initialVectors[i] - rhsVectors[i]).length();\n\t\t\tcompStati[i].desiredResidualReduction = resReduction * components[i].size() / laplacianMatrix.numberOfRows();\n\t\t\tcompStati[i].maxIters = maxIterations;\n\t\t\tcompStati[i].maxConvergenceTime = maxConvergenceTime;\n\t\t\tcompSolvers[i].solve(initialVectors[i], rhsVectors[i], compStati[i]);\n\n\t\t\tfor (auto element : components[i]) { // write solution back to result\n\t\t\t\tresult[element] = initialVectors[i][graph2Components[element]];\n\t\t\t}\n\n\t\t\tmaxIters = std::max(maxIters, compStati[i].numIters);\n\t\t}\n\n\t\tstatus.residual = (rhs - laplacianMatrix * result).length();\n\t\tstatus.converged = status.residual <= tolerance;\n\t\tstatus.numIters = maxIters;\n\t}\n\n\treturn status;\n}\n\nvoid Lamg::parallelSolve(const std::vector<Vector> &rhs, std::vector<Vector> &results, count maxConvergenceTime, count maxIterations) {\n\tif (numComponents == 1) {\n\t\tassert(rhs.size() == results.size());\n\t\tconst index numThreads = omp_get_max_threads();\n\t\tif (compSolvers.size() != numThreads) {\n\t\t\tcompSolvers.clear();\n\n\t\t\tfor (index i = 0; i < (index) numThreads; ++i) {\n\t\t\t\tcompSolvers.push_back(SolverLamg(compHierarchies[0], smoother));\n\t\t\t}\n\t\t}\n\n\t\tbool nested = omp_get_nested();\n\t\tif (nested) omp_set_nested(false);\n\n#pragma omp parallel for\n\t\tfor (index i = 0; i < rhs.size(); ++i) {\n\t\t\tindex threadId = omp_get_thread_num();\n\t\t\tLAMGSolverStatus stat;\n\t\t\tstat.desiredResidualReduction = tolerance * rhs[i].length() / (laplacianMatrix * results[i] - rhs[i]).length();\n\t\t\tstat.maxIters = maxIterations;\n\t\t\tstat.maxConvergenceTime = maxConvergenceTime;\n\t\t\tcompSolvers[threadId].solve(results[i], rhs[i], stat);\n\t\t}\n\n\t\tif (nested) omp_set_nested(true);\n\t}\n}\n\n\n\n} /* namespace NetworKit */\n" }, { "alpha_fraction": 0.6746855974197388, "alphanum_fraction": 0.6746855974197388, "avg_line_length": 19.550561904907227, "blob_id": "7fdabfa134358bb3e9a74fded8d476078d9f6959", "content_id": "59328619c32f88446ba677ca1e9baea0148c03a7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1829, "license_type": "permissive", "max_line_length": 92, "num_lines": 89, "path": "/networkit/cpp/auxiliary/Enforce.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "#ifndef ENFORCE_H_\n#define ENFORCE_H_\n\n#include <cassert>\n#include <stdexcept>\n#include <string>\n\nnamespace Aux {\n\n/**\n * Enforces that b is true and throws an Exception otherwise.\n *\n * If provided, msg must not be null, otherwise the behavior of this\n * function is undefined.\n */\ntemplate<typename Exception = std::runtime_error>\ninline void enforce(bool b, const char* msg = \"\") {\n\tassert(msg && \"Message to enforce must not be nullptr\");\n\tif (!b) {\n\t\tthrow Exception{msg};\n\t}\n}\n\n/**\n * Overload that accepts a std::string. This is mainly for convenience\n * while keeping the default-version free of unneeded allocations.\n */\ntemplate<typename Exception = std::runtime_error>\ninline void enforce(bool b, const std::string& msg) {\n\tenforce<Exception>(b, msg.c_str());\n}\n\n/**\n * Checks that the provided fstream is opened and throws an exception otherwise.\n */\ntemplate<typename Stream>\ninline void enforceOpened(const Stream& stream) {\n\tenforce(stream.is_open());\n}\n\n/**\n * This namespace provides some Types with a static member-function `void enforce(bool)`\n * that may check wether the argument is true and create some kind of failure otherwise.\n */\nnamespace Checkers {\n\t\n\t/**\n\t * Checks the bool via assert\n\t */\n\tstruct Asserter {\n\t\tstatic void enforce(bool b) {\n\t\t\tassert(b);\n\t\t\t(void) b; // prevent warnings in release-builds\n\t\t}\n\t};\n\t\n\t/**\n\t * Checks to bool via enforce\n\t */\n\tstruct Enforcer {\n\t\tstatic void enforce(bool b) {\n\t\t\t::Aux::enforce(b);\n\t\t}\n\t};\n\t\n\t/**\n\t * Calls std::terminate if the bool is false\n\t */\n\tstruct Terminator {\n\t\tstatic void enforce(bool b) {\n\t\t\tif (!b) {\n\t\t\t\tstd::terminate();\n\t\t\t}\n\t\t}\n\t};\n\n\t/**\n\t * Won't look at the bool (not even in debug-mode, which is how this differs from Asserter)\n\t */\n\tstruct Ignorer{\n\t\tstatic void enforce(bool) {}\n\t};\n}\n\n} // namespace Aux\n\n\n\n#endif // ENFORCE_H_\n" }, { "alpha_fraction": 0.6919999718666077, "alphanum_fraction": 0.7383333444595337, "avg_line_length": 33.872093200683594, "blob_id": "80f33fe4947112729a41c4ab01a162c9db864d65", "content_id": "3526b5933838c62f471fc5cdff3023dc386acfbc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3000, "license_type": "permissive", "max_line_length": 109, "num_lines": 86, "path": "/networkit/cpp/algebraic/test/NormalizedLaplacianMatrixGTest.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * NormalizedLaplacianMatrixGTest.cpp\n *\n * Created on: 25.03.2014\n * Author: Michael Wegner ([email protected])\n */\n\n#include \"NormalizedLaplacianMatrixGTest.h\"\n\n\nnamespace NetworKit {\n\nNormalizedLaplacianMatrixGTest::NormalizedLaplacianMatrixGTest() {\n}\n\nNormalizedLaplacianMatrixGTest::~NormalizedLaplacianMatrixGTest() {\n}\n\nTEST(NormalizedLaplacianMatrixGTest, testSmallNormalizedLaplacianMatrix) {\n\tNetworKit::Graph graph(7);\n\tgraph.addEdge(0, 1);\n\tgraph.addEdge(0, 4);\n\tgraph.addEdge(1, 4);\n\tgraph.addEdge(1, 2);\n\tgraph.addEdge(2, 3);\n\tgraph.addEdge(3, 4);\n\tgraph.addEdge(3, 5);\n\n\tNormalizedLaplacianMatrix normalizedLaplacianMatrix(graph);\n\tASSERT_EQ(graph.numberOfNodes(), normalizedLaplacianMatrix.numberOfRows());\n\tASSERT_EQ(graph.numberOfNodes(), normalizedLaplacianMatrix.numberOfColumns());\n\n\tEXPECT_EQ(1, normalizedLaplacianMatrix(0,0));\n\tEXPECT_EQ(-1.0 / sqrt(2.0 * 3.0), normalizedLaplacianMatrix(0,1));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(0,2));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(0,3));\n\tEXPECT_EQ(-1.0 / sqrt(2.0 * 3.0), normalizedLaplacianMatrix(0,4));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(0,5));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(0,6));\n\tEXPECT_EQ(1, normalizedLaplacianMatrix(1,1));\n\tEXPECT_EQ(-1.0 / sqrt(2.0 * 3.0), normalizedLaplacianMatrix(1,2));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(1,3));\n\tEXPECT_EQ(-1.0 / 3.0, normalizedLaplacianMatrix(1,4));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(1,5));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(1,6));\n\tEXPECT_EQ(1, normalizedLaplacianMatrix(2,2));\n\tEXPECT_EQ(-1.0 / sqrt(2.0 * 3.0), normalizedLaplacianMatrix(2,3));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(2,4));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(2,5));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(2,6));\n\tEXPECT_EQ(1, normalizedLaplacianMatrix(3,3));\n\tEXPECT_EQ(-1.0 / 3.0, normalizedLaplacianMatrix(3,4));\n\tEXPECT_EQ(-1.0 / sqrt(3.0), normalizedLaplacianMatrix(3,5));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(3,6));\n\tEXPECT_EQ(1, normalizedLaplacianMatrix(4,4));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(4,5));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(4,6));\n\tEXPECT_EQ(1, normalizedLaplacianMatrix(5,5));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(5,6));\n\tEXPECT_EQ(0, normalizedLaplacianMatrix(6,6));\n}\n\nTEST(NormalizedLaplacianMatrixGTest, testNormalizedLaplacianMatrixOfLesmisGraph) {\n\t// read lesmis graph\n\tNetworKit::METISGraphReader graphReader;\n\tNetworKit::Graph graph = graphReader.read(\"input/lesmis.graph\");\n\n\t// create NormalizedLaplacianMatrix\n\tNormalizedLaplacianMatrix mat(graph);\n\n\tmat.forNonZeroElementsInRowOrder([&](const index row, const index column, const double value){\n\t\tif (row == column) {\n\t\t\tif (graph.weightedDegree(row) != 0) {\n\t\t\t\tif (graph.isWeighted()) {\n\t\t\t\t\tEXPECT_EQ(1-graph.weight(row, row)/graph.weightedDegree(row), value);\n\t\t\t\t} else {\n\t\t\t\t\tEXPECT_EQ(1, value);\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tEXPECT_EQ(-graph.weight(row, column)/sqrt(graph.weightedDegree(row)*graph.weightedDegree(column)), value);\n\t\t}\n\t});\n}\n\n} /* namespace NetworKit */\n\n" }, { "alpha_fraction": 0.6068111658096313, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 10.962963104248047, "blob_id": "e702802eeabe3b0cc79e81fcaee857dc2963811b", "content_id": "e2377fd364562a4e5ac916d2fbb488cd4110b15f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 323, "license_type": "permissive", "max_line_length": 40, "num_lines": 27, "path": "/networkit/cpp/structures/test/CoverGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CoverGTest.h\n *\n * Created on: 12.12.2013\n * Author: Maximilian Vogel ([email protected])\n */\n\n#ifndef NOGTEST\n\n#ifndef COVERGTEST_H_\n#define COVERGTEST_H_\n\n#include <gtest/gtest.h>\n\nnamespace NetworKit {\n\nclass CoverGTest: public testing::Test {\n\n};\n\n\n\n\n} /* namespace NetworKit */\n#endif /* COVERGTEST_H_ */\n\n#endif /*NOGTEST */\n" }, { "alpha_fraction": 0.6489361524581909, "alphanum_fraction": 0.6617021560668945, "avg_line_length": 15.785714149475098, "blob_id": "94c54ba6a2ae2b2e38d4b0e79d2220f478d71952", "content_id": "2333b4ed34067dca1d95e4c8e2d186126e582acf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 470, "license_type": "permissive", "max_line_length": 45, "num_lines": 28, "path": "/networkit/cpp/algebraic/test/CSRMatrixGTest.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * CSRMatrixGTest.h\n *\n * Created on: May 13, 2015\n * Author: Michael Wegner\n */\n\n#ifndef CSRMATRIXGTEST_H_\n#define CSRMATRIXGTEST_H_\n\n#include \"gtest/gtest.h\"\n\n#include \"../CSRMatrix.h\"\n#include \"../Vector.h\"\n#include \"../../graph/Graph.h\"\n#include \"../../io/METISGraphReader.h\"\n\nnamespace NetworKit {\n\nclass CSRMatrixGTest : public testing::Test {\npublic:\n\tCSRMatrixGTest();\n\tvirtual ~CSRMatrixGTest();\n};\n\n} /* namespace NetworKit */\n\n#endif /* CSRMATRIXGTEST_H_ */\n" }, { "alpha_fraction": 0.7535008192062378, "alphanum_fraction": 0.7546766400337219, "avg_line_length": 53.07514572143555, "blob_id": "fe00f22972657c45ba6c581393f798fc94ed7021", "content_id": "df746ec3911539cc6663e7a966a3f7c06c43235c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18714, "license_type": "permissive", "max_line_length": 828, "num_lines": 346, "path": "/Doc/DevGuide.mdown", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "# NetworKit Development Guide\n\nThis text is meant to provide some guidelines for the ongoing development of the project. It is meant for core developers as well as occasional contributors.\n\nThe following text assumes some basic familiarity with the Mercurial version control software. It is not a Mercurial tutorial, because you will find a good one at [hginit.com](http://hginit.com). Rather, it explains concepts and workflows for the development of this project.\n\nIf you want to contribute, you should to consider the [technical report](https://arxiv.org/pdf/1403.3005.pdf) on NetworKit to get familiar with the architecture.\n\nIf you use NetworKit in your research publications, please cite the mentioned techincal report or the specific algorithm. A list of publications is available on the [website](TODO: add link).\n\n## How to contribute\n\n### Report bugs\n\nFor the time being, bugs should be reported by sending a report to the [mailing list][list]. Please provide a minimal example so that others can reproduce that bug.\n\n### Fork NetworKit\n\nFeel free to fork NetworKit on algohub and start contributing by fixing bugs or taking care of the issues at [kanboard.iti.kit.edu](https://kanboard.iti.kit.edu). New and missing features are welcomed aswell.\n\n\n\n## Repositories\n\nThe NetworKit main development repository is at [http://algohub.iti.kit.edu/parco/NetworKit/NetworKit](http://algohub.iti.kit.edu/parco/NetworKit/NetworKit). Access to this repository is provided on request.\n\n[algohub.iti.kit.edu](http://algohub.iti.kit.edu) (an installation of [RhodeCode](https://rhodecode.com/)) makes it easy to create and manage forks. Forking is distinct from branching and creates a new repository with a new address, its own access control etc. A fork contains all branches of its parent.\n\n### Project Tracker (Kanboard)\n\nAt [kanboard.iti.kit.edu](https://kanboard.iti.kit.edu) we maintain a project task tracker to coordinate development and releases. An account is given on request, please ask on the mailing list. Tasks are moved from left to right to through the columns:\n\n- `Backlog`: improvement ideas, some day maybe, \"nice to have\"\n- `ToDo`: scheduled improvements\n- `Work in progress`\n- `To Review`: requesting peer review\n- `Ready for Release`\n\nThere is the possibility to create \"swim lanes\" for different releases.\n\n## Branches\n\nCurrently, the two most important branches of NetworKit are `Dev` and `default`.\n\n\t\t ________ \tDev\n\t____/________\tdefault\n\nAs the name says, `default` is the branch which you are on if you do not switch. It is therefore the release branch, containing code which is ready for use. Unless you are a core developer preparing a release or fixing an urgent bug, you do not make changes to `default`.\n\n`Dev` is the development branch and most of the development of new features happens in this branch. This is also where new releases are being prepared. When pushing into this branch, think about whether your code is ready for the core development team to work with and will be suitable for a release in the foreseeable future.\n\nIt can be appropriate to create additional branches for projects, features, developer teams etc. Creation of branches should be coordinated with the core development team. For this purpose, post to the [mailing list][list].\n\n## Tags\n\nA tag is nothing more than a “symbolic name” for a revision. In NetworKit tags are used to mark release versions in the `default` branch, with a `MAJOR.MINOR` version name scheme.\n\n\n## Workflows\n\nThis section describes how to work with branches and forks in different scenarios.\n\n### Using NetworKit\n\nIf you want to build and use NetworKit and do not plan to contribute changes, simply clone the repository. By default, you will be on the `default` branch, which represents the current release. Follow the setup instructions in the `Readme`.\n\n### Core Development\n\nThis section describes workflows for the core development team.\n\n\n#### Bugfixes\n\nBugfixes are changes that should be immediately visible to users of NetworKit, such as solutions for urgent errors or improvements of the `Readme` document. In this case, make the changes in the `default` branch and commit. Then switch to the `Dev` branch and merge the `default` branch back into `Dev`.\n\n\n\t\t _______________ \tDev\n\t\t /\t\t\t / < \tmerge default into Dev\n\t____/____________/____\tdefault\n\t\t\t\t ^ bugfix\n\n\nExample:\n\n\thg up default\n\t...\n\thg com -m \"fixed bug xyz\"\n\thg up Dev\n\thg merge default\n\thg com -m \"backmerge bugfix xyz\"\n\n\n#### Releasing New Features\n\nWhen new features should be released, the `Dev` branch is merged into the `default` branch. Additional testing and cleanup is performed before that happens. The new major or minor release is then tagged with a version number.\n\n\t\t ______________________________________________________ \tDev\n\t\t / ^ new feature prepare release ^ \\ < merge Dev into default\n\t____/________________________________________\\______________\tdefault\n\t\t\t\t \t\t\t\t\t\t\t\t\t ^ tag version\n\n\nExample:\n\n\thg up Dev\n\thg com -m \"ready for release X.Y\"\n\thg up default\n\thg merge Dev\n\thg com -m \"release X.Y\"\n\n\n#### Multiple heads in multiple branches\n\nIf remote changes have happened in multiple branches and you pull them, these branch will have multiple heads. Merging now needs to happen for each of the affected branches before you can push. Switch to each branch and perform a merge as usual. As an alternative to merging, you may try the `rebase` [extension][rebase].\n\n[rebase]: https://www.mercurial-scm.org/wiki/RebaseExtension\n\n\n\n### Contributions\n\nUsers of NetworKit are welcome to contribute their modifications. New features must be added to the `Dev` branch, not the `default` branch. We recommend the following workflow:\n\n1. create a fork of the main repository\n2. switch to the `Dev` branch\n2. make and commit your changes while being on the `Dev` branch\n3. send a pull request to the main repository\n\n\n\n### Student Projects\n\nStudents with long-term projects like Bachelor's or Master's theses should familiarize themselves with the guidelines and select a forking/branching model with their advisor.\n\n\n\n\n## Branching Cheat Sheet\n\n- list all available branches: `hg branches`\n- check on which branch you are: `hg branch`\n- see heads (most recent commits) of all branches: `hg head`\n- see tip (most recent commits) of the branch you are currently working on: `hg tip`\n- switch to a specific branch: `hg update <branchname>`\n- start a new branch:\t`hg branch <branchname>`\n- merge `branchY` into `branchX`: `hg update branchX`, then `hg merge branchY`\n\n\n## Conventions\n\nThe following general conventions apply to all NetworKit developers.\n\n### Versioning\n\n- Before you commit, make sure your code compiles and run the unit tests. Never push code which breaks the build for others.\n- Commit regularly and often to your local repository.\n- Use meaningful commit messages.\n- Get the newest changes from the repository regularly and merge them into your local repository.\n- Make sure that you merged correctly and did not break other people's work.\n- Push correct code early if possible. Merging is easier if all developers are up to date.\n- Never `push --force` to the main repository.\n\n\n\n## Unit Tests and Testing\n\nEvery new feature must be covered by a unit test. Omitting unit tests makes it very likely that your feature will break silently as the project develops, leading to unneccessary work in tracing back the source of the error.\n\nUnit tests for the C++ part of NetworKit are based on the `googletest` library. For more information read the [googletest primer](http://code.google.com/p/googletest/wiki/Primer). The Python test framework currently relies on `nose` to collect the tests.\n\n- Each source folder contains a `test` folder with `googletest` classes. Create the unit tests for each feature in the appropriate `test/*GTest` class by adding a `TEST_F` function.\n- Prefix standard unit tests with `test` and experimental feature tests with `try`. A `test*` must pass when pushed to the main repository, a `try*` is allowed to fail.\n- Keep the running time of test functions to the minimum needed for testing functionality. Testing should be fast, long-running unit tests look like infinite loops.\n- If the unit test requires a data set, add the file to the `input/` folder. Only small data sets (a few kilobytes maximum) are acceptable in the repository.\n- Any output files produced by unit tests must be written to the `output/` folder.\n\nTo build and run the tests you need the [gtest library](https://code.google.com/p/googletest/). Assuming, gtest is successfully installed and you add the paths to your build.conf, the unit tests should be compiled with:\n\n\tscons --optimize=Dbg --target=Tests\n\nTo verify that the code was built correctly: Run all unit tests with\n\n\t\t./NetworKit-Tests-Dbg --tests/-t\n\nPerformance tests will be selected with\n\n\t\t./NetworKit-Tests-Dbg --benchmarks/-b\n\nwhile experimental tests are called with\n\n\t\t./NetworKit-Tests-Dbg --trials/-e\n\nTo run only specific unit tests, you can also add a filter expression, e. g.:\n\n\t\t./NetworKit-Tests-Dbg --gtest_filter=*PartitionGTest*/-f*PartitionGTest*\n\ninitiates unit tests only for the Partition data structure.\n\nFor the __Python__ unit tests, run:\n\n\t\tpython3 setup.py test [--cpp-tests/-c]\n\nThis command will compile the _NetworKit extension and then run all test cases on the Python layer. If you append `--cpp-tests/-c`, the unit tests of the c++ side will be compiled and run before the Python test cases.\n\n\n### Test-driven development\n\nIf you implement a new feature for NetworKit, we encourage you to adapt your development process to test driven development. This means that you start with a one or ideally several test-cases for your feature and then write the feature for the test case(s). If your feature is mostly implemented in C++, you should write your test cases there. If you expose your feature to Python, you should also write a test case for the extension module on the Python layer. The same applies for features in Pyton.\n\n\n### Code Style\n\n- Compiler warnings are likely to turn into future errors. Try to fix them as soon as they appear.\n- Read some code to get used to the code style and try to adopt it.\n- Document classes, methods and attributes in Doxygen style.\n- Use the `count` and `index` integer types for non-negative integer quantities and indices.\n- In most cases, objects are passed by reference. New objects are stack-allocated and returned by value. Avoid pointers and `new` where possible.\n- Use the `override` keyword to indicate that a method overrides a virtual method in the superclass.\n- In Python, indent using tabs, not spaces.\n\n\n### Algorithm interface and class hierarchy\n\nWe use the possibilities provided through inheritance to generalize the common behaviour of algorithm implementations:\n\n- Data and paramters should be passed in the constructor.\n- A void run()-method that takes no parameter triggers the execution.\n- To retrieve the result(s), getter-functions() may be defined.\n\nThe `Algorithm` base class also defines a few other other functions to query whether the algorithm can be run in parallel or to retrieve a string representation.\n\nThere may be more levels in the class hierarchy between an algorithm implementation and the base class, e.g. a single-source shortest-path class `SSSP` that generalizes the behaviour of BFS and Dijkstra implementations or the `Centrality` base class. When implementing new features or algorithms, make sure to adapt to the existing class hierarchies. The least thing to do is to inherit from the `Algorithm` base class. Changes to existing interfaces or suggestions for new interfaces should be discussed through the [mailing list]([email protected]).\n\n\n## Exposing C++ Code to Python\n\nAssuming the unit tests for the new feature you implemented are correct and successful, you need to make your features available to Python in order to use it. NetworKit uses Cython to bridge C++ and Python. All of this bridge code is contained in the Cython code file `src/python/_Networkit.pyx`. The content is automatically translated into C++ and then compiled to a Python extension module.\n\nCython syntax is a superset of Python that knows about static type declarations and other things from the C/C++ world. The best way to getting used to it is working on examples. Take the most common case of exposing a C++ class as a Python class. Care for the following example that exposes the class `NetworKit::Dijkstra`:\n\n\t\tcdef extern from \"cpp/graph/Dijkstra.h\":\n\t\t\tcdef cppclass _Dijkstra \"NetworKit::Dijkstra\"(_SSSP):\n\t\t\t\t_Dijkstra(_Graph G, node source, bool storePaths, bool storeStack, node target) except +\n\nThe code above exposes the C++ class definition to Cython - but not yet to Python. First of all, Cython needs to know which C++ declarations to use so the the first line directs Cython to place an `#include` statement. The second line defines a class that is only accessible in the Cython world. Our convention is that the name of the new class is the name of the referenced C++ class with a prepended underscore to avoid namespace conflicts. What follows is the \"real\" C++ name of the class. After that, the declarations of the methods you want to make available for Python are needed. The `except +` statement is necessary for exceptions thrown by the C++ code to be rethrown as Python exceptions rather than causing a crash. Also, take care that the Cython declarations match the declarations from the referenced header file.\n\n\t\tcdef extern from \"cpp/graph/SSSP.h\":\n\t\t\tcdef cppclass _SSSP \"NetworKit::SSSP\"(_Algorithm):\n\t\t\t\t_SSSP(_Graph G, node source, bool storePaths, bool storeStack, node target) except +\n\t\t\t\tvector[edgeweight] getDistances(bool moveOut) except +\n\t\t\t\t[...]\n\n\t\tcdef class SSSP(Algorithm):\n\t\t\t\"\"\" Base class for single source shortest path algorithms. \"\"\"\n\n\t\t\tcdef Graph _G\n\n\t\t\tdef __init__(self, *args, **namedargs):\n\t\t\t\tif type(self) == SSSP:\n\t\t\t\t\traise RuntimeError(\"Error, you may not use SSSP directly, use a sub-class instead\")\n\n\t\t\tdef __dealloc__(self):\n\t\t\t\tself._G = None # just to be sure the graph is deleted\n\n\t\t\tdef getDistances(self, moveOut=True):\n\t\t\t\t\"\"\"\n\t\t\t\tReturns a vector of weighted distances from the source node, i.e. the\n\t\t \t \tlength of the shortest path from the source node to any other node.\n\n\t\t \t \tReturns\n\t\t \t \t-------\n\t\t \t \tvector\n\t\t \t \t\tThe weighted distances from the source node to any other node in the graph.\n\t\t\t\t\"\"\"\n\t\t\t\treturn (<_SSSP*>(self._this)).getDistances(moveOut)\n\t\t\t[...]\n\nWe mirror the class hierarchy of the C++ world also in Cython and Python. This also saves some boiler plate wrapping code as the functions shared by Dijkstra and BFS only need to be wrapped through SSSP.\n\n\t\tcdef class Dijkstra(SSSP):\n\t\t\t\"\"\" Dijkstra's SSSP algorithm.\n\n\t\t\tReturns list of weighted distances from node source, i.e. the length of the shortest path from source to\n\t\t\tany other node.\n\n\t\t Dijkstra(G, source, [storePaths], [storeStack], target)\n\n\t\t Creates Dijkstra for `G` and source node `source`.\n\n\t\t Parameters\n\t\t\t----------\n\t\t\tG : Graph\n\t\t\t\tThe graph.\n\t\t\tsource : node\n\t\t\t\tThe source node.\n\t\t\tstorePaths : bool\n\t\t\t\tstore paths and number of paths?\n\t\t\tstoreStack : bool\n\t\t\t\tmaintain a stack of nodes in order of decreasing distance?\n\t\t\ttarget : node\n\t\t\t\ttarget node. Search ends when target node is reached. t is set to None by default.\n\t\t \"\"\"\n\t\t\tdef __cinit__(self, Graph G, source, storePaths=True, storeStack=False, node target=none):\n\t\t\t\tself._G = G\n\t\t\t\tself._this = new _Dijkstra(G._this, source, storePaths, storeStack, target)\n\nFor the class to be accessible from the Python world, you need to define a Python wrapper class which delegates method calls to the native class. The Python class variable `_this` holds a pointer to an instance of the native class. Please note that the parameters are now Python objects. Method wrappers take these Python objects as parameters and pass the internal native objects to the actuall C++ method call. The constructor of such a wrapper class is called `__cinit__`, and it creates an instance of the native object.\n\nThe docstring between the triple quotation marks can be accessed through Python's `help(...)` function and are the main documentation of NetworKit. Always provide at least a short and precise docstring so the user can get in idea of the functionality of the class. For C++ types available to Python and further examples, see through the `_NetworKit.pyx`-file. The whole process has certainly some intricacies, e.g. some tricks are needed to avoid memory waste when passing around large objects such as graphs. When in doubt, look at examples of similar classes already exposed. Listen to the Cython compiler - coming from C++, its error messages are in general pleasantly human-readable.\n\n## Make algorithms interruptable with CTRL+C/SIGINT\n\nWhen an algorithms takes too long to produce a result, it can be interrupted with a SIGINT signal triggered by CTRL+C. When triggering from the Python shell while the runtime is in the C++ domain, execution is aborted and even terminates the Python shell. Therefor, we implemented a signal handler infrastructure in C++ that raises a special exception instead of aborting. When implementing an algorithm, it is strongly encouraged to integrate the signal handler into the implementation. There are many examples of how to use it, e.g. `networkit/cpp/centrality/Betweenness.cpp` or `networkit/cpp/community/PartitionFragmentation.cpp`\n\n\n\n\n## Contact\n\nTo discuss important changes to NetworKit, use the [e-mail list][list] (`[email protected]`).\n\n\n[list]: https://lists.ira.uni-karlsruhe.de/mailman/listinfo/networkit\n\n\n## Building the documentation\n\nThe class documentation and the website can be automatically generated with sphinx. You will need the following\nsoftware to generate the documentation and website:\n\n- [Sphinx](http://www.sphinx-doc.org) (e.g. via `pip3 install sphinx`)\n- [Pandoc](http://pandoc.org)\n- [Doxygen](http://www.stack.nl/~dimitri/doxygen/)\n\nAfter you installed the above mentioned software, you can build the class documentation by calling `./make_doc.sh`\nin the folder `Doc/doc`. This will generate the class documentation for C++ and Python in `Doc/Documentation`.\nSimilarly, you can call `./make_www.sh` to build the website. After the build finished, you find the generated website\nin `Doc/Website/`.\n\n\n## Further Reading\n\n- [hginit.com](http://hginit.com)\n- [Working with named branches](http://humblecoder.co.uk/blog/2010/02/24/working-with-named-branches-in-mercurial/)\n- [Managing releases and branchy development](http://hgbook.red-bean.com/read/managing-releases-and-branchy-development.html)\n- [Cython Documentation](http://docs.cython.org/index.html)\n" }, { "alpha_fraction": 0.6960651278495789, "alphanum_fraction": 0.7069199681282043, "avg_line_length": 19.47222137451172, "blob_id": "7de13f392fbd6b0836d93fa9dc1608459082d933", "content_id": "e6db3ec9e597b80d3afc05b4268e2b99b8f5d9e3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 737, "license_type": "permissive", "max_line_length": 91, "num_lines": 36, "path": "/networkit/cpp/viz/MaxentStress.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * MaxentStress.h\n *\n * Created on: 22.01.2014\n * Author: Henning\n */\n\n#ifndef MAXENTSTRESS_H_\n#define MAXENTSTRESS_H_\n\n#include \"Layouter.h\"\n#include \"../linkprediction/AlgebraicDistanceIndex.h\"\n\nnamespace NetworKit {\n\n/**\n * @ingroup viz\n */\n// TODO: refactor to inherit from LayoutAlgorithm base class\nclass MaxentStress: public NetworKit::Layouter {\nprotected:\n\n\npublic:\n\t/**\n\t * DO NOT USE to create objects.\n\t * Default constructor. Only necessary for Python shell.\n\t */\n\tMaxentStress() {} // nullary constructor needed for Python shell\n\tMaxentStress(Point<float> bottomLeft, Point<float> topRight, bool useGivenLayout = false);\n\n\tvirtual void draw(Graph& g);\n};\n\n} /* namespace NetworKit */\n#endif /* MAXENTSTRESS_H_ */\n" }, { "alpha_fraction": 0.6537947654724121, "alphanum_fraction": 0.6696953773498535, "avg_line_length": 32.71171188354492, "blob_id": "efd2b78ad1c1f3372cefce3697d5378ab001ecb0", "content_id": "f6e249b06fd817ee0b3a2d2e404ead576b4cb630", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7487, "license_type": "permissive", "max_line_length": 166, "num_lines": 222, "path": "/networkit/cpp/geometric/HyperbolicSpace.cpp", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * HyperbolicSpace.cpp\n *\n * Created on: 20.05.2014\n * Author: Moritz v. Looz ([email protected])\n */\n\n#include <assert.h>\n#include <cmath>\n\n\n#include \"HyperbolicSpace.h\"\n#include \"../auxiliary/Log.h\"\n\nusing std::abs;\nusing std::max;\n\nnamespace NetworKit {\n\ndouble HyperbolicSpace::nativeDistance(double firstangle, double firstR, double secondangle, double secondR) {\n\tassert(firstR >= 0);\n\tassert(secondR >= 0);\n\tassert(firstangle >= 0);\n\tassert(firstangle < 2*M_PI);\n\tassert(secondangle >= 0);\n\tassert(secondangle < 2*M_PI);\n\tdouble result;\n\tif (firstangle == secondangle) {\n\t\tresult = abs(firstR - secondR);\n\t}\n\telse {\n\t\tdouble deltaPhi = M_PI - abs(M_PI-abs(firstangle - secondangle));\n\t\tdouble coshDist = cosh(firstR)*cosh(secondR)-sinh(firstR)*sinh(secondR)*cos(deltaPhi);\n\t\tif (coshDist >= 1) result = acosh(coshDist);\n\t\telse result = 0;\n\t}\n\tassert(result >= 0);\n\treturn result;\n}\n\n/**\n * This distance measure is taken from the Poincaré disc model.\n */\ndouble HyperbolicSpace::poincareMetric(double phi_a, double r_a, double phi_b, double r_b) {\n\tassert(r_a < 1);\n\tassert(r_b < 1);\n\treturn poincareMetric(polarToCartesian(phi_a, r_a), polarToCartesian(phi_b, r_b));\n}\n\ndouble HyperbolicSpace::poincareMetric(Point2D<double> a, Point2D<double> b) {\n\tassert(a.length() < 1);\n\tassert(b.length() < 1);\n\tdouble result = acosh( 1 + 2*a.squaredDistance(b) / ((1 - a.squaredLength())*(1 - b.squaredLength())));\n\tassert(result >= 0);\n\treturn result;\n}\n\n//double HyperbolicSpace::nativeHyperbolicDistance(double phi_a, double r_a, double phi_b, double r_b) {\n//\t/* Returns the hyperbolic distance between points u and v\n//\t* 2010 paper, eqn: 5\n//\t*/\n//\tdouble deltaPhi = M_PI - abs(M_PI-abs(phi_a - phi_b));\n//\tdouble distance = acosh(cosh(r_a)*cosh(r_b) - sinh(r_a)*sinh(r_b)*cos(deltaPhi));\n//\treturn distance;\n//}\n\nvoid HyperbolicSpace::fillPoints(vector<double> &angles, vector<double> &radii, double R, double alpha) {\n\tfillPoints(angles, radii, 0, 2*M_PI, 0, R, alpha);\n}\n\nvoid HyperbolicSpace::fillPoints(vector<double> &angles, vector<double> &radii, double minPhi, double maxPhi, double minR, double maxR, double alpha) {\n\tuint64_t n = radii.size();\n\tassert(angles.size() == n);\n\n\tdouble mincdf = cosh(alpha*minR);\n\tdouble maxcdf = cosh(alpha*maxR);\n\tstd::uniform_real_distribution<double> phidist{minPhi, maxPhi};\n\tstd::uniform_real_distribution<double> rdist{mincdf, maxcdf};\n\n\tfor (uint64_t i = 0; i < n; i++) {\n\t\tangles[i] = phidist(Aux::Random::getURNG());\n\t\t/**\n\t\t * for the radial coordinate distribution, I took the probability density from Greedy Forwarding in Dynamic Scale-Free Networks Embedded in Hyperbolic Metric Spaces\n\t\t * f (r) = sinh r/(cosh R − 1)\n\t\t * \\int sinh = cosh+const\n\t\t */\n\t\tdouble random = rdist(Aux::Random::getURNG());\n\t\tradii[i] = (acosh(random)/alpha);\n\t\tassert(radii[i] <= maxR);\n\t\tassert(radii[i] >= minR);\n\t\tassert(angles[i] <= maxPhi);\n\t\tassert(angles[i] >= minPhi);\n\t\tif (radii[i] == maxR) radii[i] = std::nextafter(radii[i], 0);\n\t\tassert(radii[i] < maxR);\n\t}\n}\n\nPoint2D<double> HyperbolicSpace::polarToCartesian(double phi, double r) {\n\treturn Point2D<double>(r*cos(phi), r*sin(phi));\n}\n\nstd::map<index, Point<float> > HyperbolicSpace::polarToCartesian(const vector<double> &angles, const vector<double> &radii) {\n\tassert(radii.size() == angles.size());\n\tstd::map<index, Point<float> > result;\n\tfor (index i = 0; i < angles.size(); i++) {\n\t\tPoint2D<double> coord = HyperbolicSpace::polarToCartesian(angles[i], radii[i]);\n\t\tPoint<float> temp(coord[0], coord[1]);\n\t\tresult.insert(std::make_pair(i, temp));\n\t}\n\treturn result;\n}\n\nvoid HyperbolicSpace::cartesianToPolar(Point2D<double> a, double &phi, double &r) {\n\tr = a.length();\n\tif (r == 0) phi = 0;\n\telse if (a[1] >= 0){\n\t\tphi = acos(a[0]/ r);\n\t} else {\n\t\tphi = -acos(a[0] / r);\n\t}\n\tif (phi < 0) phi += 2*M_PI;\n}\n\nvoid HyperbolicSpace::getEuclideanCircle(Point2D<double> hyperbolicCenter, double hyperbolicRadius, Point2D<double> &euclideanCenter, double &euclideanRadius) {\n\tdouble phi_h, r_h;\n\tHyperbolicSpace::cartesianToPolar(hyperbolicCenter, phi_h, r_h);\n\tdouble r_c;\n\tHyperbolicSpace::getEuclideanCircle(r_h, hyperbolicRadius, r_c, euclideanRadius);\n\teuclideanCenter = HyperbolicSpace::polarToCartesian(phi_h, r_c);\n}\n\nvoid HyperbolicSpace::getEuclideanCircle(double r_h, double hyperbolicRadius, double &radialCoordOfEuclideanCenter, double &euclideanRadius) {\n\tdouble a = cosh(hyperbolicRadius)-1;\n\tdouble b = 1-(r_h*r_h);\n\tradialCoordOfEuclideanCenter = (2*r_h)/(b*a+2);\n\teuclideanRadius = sqrt(radialCoordOfEuclideanCenter*radialCoordOfEuclideanCenter - (2*r_h*r_h - b*a)/(b*a+2));\n}\n\ndouble HyperbolicSpace::hyperbolicRadiusToEuclidean(double hyperbolicRadius) {\n\tdouble ch = cosh(hyperbolicRadius);\n\treturn sqrt((ch-1)/(ch+1));\n}\n\ndouble HyperbolicSpace::EuclideanRadiusToHyperbolic(double euclideanRadius) {\n\tdouble eusq = euclideanRadius*euclideanRadius;\n\tdouble result = acosh( 1 + 2*eusq / ((1 - eusq)));\n\treturn result;\n}\n\ndouble HyperbolicSpace::maxRinSlice(double minPhi, double maxPhi, double phi_c, double r_c, double euRadius) {\n\tdouble maxCos = max(cos(abs(minPhi - phi_c)), cos(abs(maxPhi - phi_c)));\n\t//double mirrorAngle;\n\t//if (phi_c >= M_PI) mirrorAngle = phi_c - M_PI;\n\t//else mirrorAngle = phi_c + M_PI;\n\n\tif (minPhi < phi_c && phi_c < maxPhi) maxCos = 1;\n\t//applying law of cosines here\n\tdouble p = r_c*maxCos;\n\tdouble maxR = p + sqrt(p*p - r_c*r_c + euRadius*euRadius);\n\treturn maxR;\n}\n\ndouble HyperbolicSpace::hyperbolicSpaceInEuclideanCircle(double r_c, double d_c,\n\t\tdouble r_max) {\n\tdouble result = 0;\n\tassert(r_c >= 0);\n\tassert(d_c >= 0);\n\tassert(r_c <= r_max);\n\tdouble min = r_c - d_c;\n\tdouble max = std::min(r_c+d_c, r_max);\n\n\tif (d_c > r_c) {\n\t\t//the query circle overlaps the origin\n\n\t\tif (d_c - r_c < r_max) {\n\t\t\t//remaining query circle is smaller than the disk representation\n\t\t\tresult += 2*M_PI*(cosh(EuclideanRadiusToHyperbolic(d_c-r_c))-1);//adding small circle around origin\n\t\t} else {\n\t\t\tresult += 2*M_PI*(cosh(EuclideanRadiusToHyperbolic(r_max))-1);//adding small circle around origin\n\t\t}\n\t\tassert(result <= 2*M_PI*(cosh(EuclideanRadiusToHyperbolic(r_max))-1));\n\t\tmin = std::nextafter(d_c-r_c, std::numeric_limits<double>::max());//correcting integral start to exclude circle\n\t}\n\n\t/**\n\t * Now, the integral.\n\t * It is 4\\int_{min}^{max} \\text{acos}(\\frac{r_c^2-d_c^2+r^2}{2r_c\\cdot r}) \\cdot \\frac{1}{1-r^2} \\cdot (\\sinh (\\text{acosh}( 1 + 2\\frac{r^2}{1 - r^2})))\\,dr\n\t * This solution was computed by WolframAlpha\n\t */\n\n\tif (max < min) return result;\n\n\tauto realpart = [](double r, double d, double c) {\n\t\tdouble result = acos((c*c-d*d+r*r) / (2*c*r)) / (r*r-1);\n\t\treturn result;\n\t};\n\n\tauto firstlogpart = [](double r, double d, double c) {\n\t\tdouble s = (c*c-d*d);\n\t\t//double denominator = r*r*s*s;\n\t\tdouble rsqs = r*r+s;\n\t\tdouble real = -2*s*sqrt(4*c*c*r*r-rsqs*rsqs);\n\t\tdouble imag = -4*c*c*r*r+2*s*r*r+2*s*s;\n\t\treturn atan2(imag, real)/2;\n\t};\n\n\tauto secondlogpart = [](double r, double d, double c) {\n\t\tdouble s = (c*c-d*d);\n\t\tdouble rsqs = r*r+s;\n\t\t//double denominator = (r*r-1)*(s-1);\n\t\tdouble real = sqrt(4*c*c*r*r-rsqs*rsqs);\n\t\tdouble imag = 2*c*c*(r*r+1)-(s+1)*rsqs;\n\t\timag = imag / sqrt((s+1)*(s+1)-(4*c*c));\n\t\treturn (s-1)*atan2(imag, real)/(2*sqrt((s+1)*(s+1)-(4*c*c)));\n\t};\n\n\tdouble lower = -realpart(min, d_c, r_c) -firstlogpart(min, d_c, r_c) + secondlogpart(min, d_c, r_c);\n\tdouble upper = -realpart(max, d_c, r_c) -firstlogpart(max, d_c, r_c) + secondlogpart(max, d_c, r_c);\n\tresult += 4*(upper - lower);\n\treturn result;\n}\n}\n" }, { "alpha_fraction": 0.6929422616958618, "alphanum_fraction": 0.7011915445327759, "avg_line_length": 19.203702926635742, "blob_id": "c031c76b5c459b6dbe9e968008ed8c4f78af87be", "content_id": "9ad860ce5d0bf4cdac3b827d1c33b5e56190f2e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1091, "license_type": "permissive", "max_line_length": 98, "num_lines": 54, "path": "/networkit/cpp/numerics/LAMG/Level/Level.h", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "/*\n * Level.h\n *\n * Created on: 09.01.2015\n * Author: Michael\n */\n\n#ifndef LEVEL_H_\n#define LEVEL_H_\n\n#include \"../../../algebraic/CSRMatrix.h\"\n\nnamespace NetworKit {\n\nenum LevelType {FINEST, // original problem\n\tELIMINATION, // lowdegree node elimination\n\tAGGREGATION, // aggregation of nodes with high affinity\n\tCOARSEST // coarsest level\n};\n\n/**\n * @ingroup numerics\n * Abstract base class for an LAMG Level.\n */\nclass Level {\nprotected:\n\tLevelType type;\n\tCSRMatrix A;\n\npublic:\n\tLevel(LevelType type);\n\tLevel(LevelType type, const CSRMatrix &A);\n\tvirtual ~Level() {}\n\n\tconst CSRMatrix& getLaplacian() const;\n\n\tcount getNumberOfNodes() const;\n\n\tvirtual void coarseType(const Vector &xf, Vector &xc) const = 0;\n\n\tvirtual void restrict(const Vector &bf, Vector &bc) const;\n\n\tvirtual void restrict(const Vector &bf, Vector &bc, std::vector<Vector> &bStages) const;\n\n\tvirtual void interpolate(const Vector &xc, Vector &xf) const;\n\n\tvirtual void interpolate(const Vector &xc, Vector &xf, const std::vector<Vector> &bStages) const;\n\n\n};\n\n} /* namespace NetworKit */\n\n#endif /* LEVEL_H_ */\n" }, { "alpha_fraction": 0.6713194847106934, "alphanum_fraction": 0.6764550805091858, "avg_line_length": 33.825687408447266, "blob_id": "ee4732c4e9b2bc598b6ea8d8405dbe59ec8f62eb", "content_id": "da27632ae391af905af994d599b467e7acef4b4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7594, "license_type": "permissive", "max_line_length": 128, "num_lines": 218, "path": "/networkit/GraphMLIO.py", "repo_name": "networkitproject/networkit-mirror", "src_encoding": "UTF-8", "text": "import xml.etree.cElementTree as ET\nimport xml.sax\n\nfrom _NetworKit import Graph\n\n# GraphML Reader\nclass GraphMLSAX(xml.sax.ContentHandler):\n\t\"\"\" Parser for GraphML XML files, based on Pythons XML.SAX implementation. \"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\" Initializes several important variables \"\"\"\n\t\txml.sax.ContentHandler.__init__(self)\n\t\tself.charBuffer = []\n\t\tself.mapping = dict()\n\t\tself.g = Graph(0)\n\t\tself.graphName = 'unnamed'\n\t\tself.weightedID = ''\n\t\tself.weighted = False\n\t\tself.directed = False\n\t\tself.edgestack = []\n\t\tself.edgeweight = 0.0\n\t\tself.keepData = False\n\n\tdef startElement(self, name, attrs):\n\t\t\"\"\" Parses all currently relevant XML tags and retrieves data.\"\"\"\n\t\tif name == \"graph\":\n\t\t\t# determine, if graph is directed:\n\t\t\tif attrs.getValue(\"edgedefault\") == \"directed\":\n\t\t\t\tprint(\"identified graph as directed\")\n\t\t\t\tself.directed = True\n\t\t\tif \"id\" in attrs.getNames() and not attrs.getValue(\"id\") == '':\n\t\t\t\t\tself.graphName = attrs.getValue(\"id\")\n\t\t\tself.g = Graph(0,self.weighted, self.directed)\n\t\t\tself.g.setName(self.graphName)\n\t\tif name == \"node\":\n\t\t\tu = self.g.addNode()\n\t\t\tval = attrs.getValue(\"id\")\n\t\t\tself.mapping[val] = u\n\t\telif name == \"edge\":\n\t\t\tu = attrs.getValue(\"source\")\n\t\t\tv = attrs.getValue(\"target\")\n\t\t\tself.edgestack.append((u,v))\n\t\telif name == \"key\":\n\t\t\t#print(\"found element with tag KEY\")\n\t\t\tif (attrs.getValue(\"for\") == 'edge' and attrs.getValue(\"attr.name\") == 'weight' and attrs.getValue(\"attr.type\") == 'double'):\n\t\t\t\tself.weighted = True\n\t\t\t\tself.weightedID = attrs.getValue(\"id\")\n\t\t\t\tprint(\"identified graph as weighted\")\n\t\telif name == \"data\" and attrs.getValue(\"key\") == self.weightedID:\n\t\t\tself.keepData = True\n\n\tdef endElement(self, name):\n\t\t\"\"\" Finalizes parsing of the started Element and processes retrieved data.\"\"\"\n\t\tdata = self.getCharacterData()\n\t\tif name == \"edge\":\n\t\t\tu = self.edgestack[len(self.edgestack)-1][0]\n\t\t\tv = self.edgestack[len(self.edgestack)-1][1]\n\t\t\tself.edgestack.pop()\n\t\t\tif self.weighted:\n\t\t\t\t#print (\"identified edge as weighted with weight: {0}\".format(edgeweight))\n\t\t\t\tself.g.addEdge(self.mapping[u], self.mapping[v], self.edgeweight)\n\t\t\t\tself.edgeweight = 0.0\n\t\t\telse:\n\t\t\t\tself.g.addEdge(self.mapping[u], self.mapping[v])\n\t\telif name == \"data\" and self.keepData:\n\t\t\tself.keepData = False\n\t\t\tself.edgeweight = float(data)\n\n\tdef characters(self, content):\n\t\tself.charBuffer.append(content)\n\n\tdef getCharacterData(self):\n\t\tdata = ''.join(self.charBuffer).strip()\n\t\tself.charBuffer = []\n\t\treturn data\n\n\tdef getGraph(self):\n\t\treturn self.g\n\nclass GraphMLReader:\n\t\"\"\" This class serves as wrapper for the GraphMLSAX class\n\t\twhich is able to parse a GraphML XML file and construct\n\t\ta graph. \"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\" Initializes the GraphMLSAX class \"\"\"\n\t\tself.graphmlsax = GraphMLSAX()\n\n\tdef read(self, fpath):\n\t\t\"\"\" Parses a GraphML XML file and returns the constructed Graph\n\t\t\tParameters:\n\t\t\t\t- fpath: the path to the file as a string\n\t\t\"\"\"\n\t\txml.sax.parse(fpath, self.graphmlsax)\n\t\treturn self.graphmlsax.getGraph()\n\n# GraphMLWriter\nclass GraphMLWriter:\n\t\"\"\" This class provides a function to write a NetworKit graph to a file in the \n\t\tGraphML format. \"\"\"\n\t\n\tdef __init__(self):\n\t\t\"\"\" Initializes the class. \"\"\"\n\t\tself.edgeIdCounter = 0\n\t\tself.dir_str = ''\n\n\tdef write(self, graph, fname, nodeAttributes = {}, edgeAttributes = {}):\n\t\t\"\"\" Writes a NetworKit graph to the specified file fname. \n\t\t\tParameters:\n\t\t\t\t- graph: a NetworKit::Graph python object \n\t\t\t\t- fname: the desired file path and name to be written to\n\t\t\t\t- nodeAttributes: optional dictionary of node attributes in the form attribute name => list of attribute values\n\t\t\t\t- edgeAttributes: optional dictionary of edge attributes in the form attribute name => list of attribute values\n\t\t\"\"\"\n\t\t# reset some internal variables in case more graphs are written with the same instance\n\t\tself.edgeIdCounter = 0\n\t\tself.dir_str = ''\n\n\t\tif len(edgeAttributes) > 0 and not graph.hasEdgeIds():\n\t\t\traise RuntimeError(\"Error, graph must have edge ids if edge attributes are given\")\n\n\t\t# start with the root element and the right header information\n\t\troot = ET.Element('graphml')\n\t\troot.set(\"xmlnsi\",\"http://graphml.graphdrawing.org/xmlns\")\n\t\troot.set(\"xmlns:xsi\",\"http://www.w3.org/2001/XMLSchema-instance\")\n\t\troot.set(\"xsi:schemaLocation\",\"http://graphml.graphdrawing.org/xmlns \\\n\t\t\thttp://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd\")\n\n\t\tmaxAttrKey = 1\n\t\t# if the graph is weighted, add the attribute\n\t\tif graph.isWeighted():\n\t\t\tattrElement = ET.SubElement(root,'key')\n\t\t\tattrElement.set('for','edge')\n\t\t\tattrElement.set('id', 'd1')\n\t\t\tattrElement.set('attr.name','weight')\n\t\t\tattrElement.set('attr.type','double')\n\t\t\tmaxAttrKey += 1\n\n\t\tattrKeys = {}\n\t\timport numbers\n\t\timport itertools\n\t\tfor attType, attName, attData in itertools.chain(\n\t\t\tmap(lambda d : ('node', d[0], d[1]), nodeAttributes.items()),\n\t\t\tmap(lambda d : ('edge', d[0], d[1]), edgeAttributes.items())):\n\n\t\t\tattrElement = ET.SubElement(root, 'key')\n\t\t\tattrElement.set('for', attType)\n\t\t\tattrElement.set('id', 'd{0}'.format(maxAttrKey))\n\t\t\tattrKeys[(attType, attName)] = 'd{0}'.format(maxAttrKey)\n\t\t\tmaxAttrKey += 1\n\t\t\tattrElement.set('attr.name', attName)\n\t\t\tif isinstance(attData[0], bool):\n\t\t\t\tattrElement.set('attr.type', 'boolean')\n\t\t\t\t# special handling for boolean attributes: convert boolean into lowercase string\n\t\t\t\tif attType == 'edge':\n\t\t\t\t\tedgeAttributes[attName] = [ str(d).lower() for d in attData ]\n\t\t\t\telse:\n\t\t\t\t\tnodeAttributes[attName] = [ str(d).lower() for d in attData ]\n\t\t\telif isinstance(attData[0], numbers.Integral):\n\t\t\t\tattrElement.set('attr.type', 'int')\n\t\t\telif isinstance(attData[0], numbers.Real):\n\t\t\t\tattrElement.set('attr.type', 'double')\n\t\t\telse:\n\t\t\t\tattrElement.set('attr.type', 'string')\n\n\n\t\t# create graph element with appropriate information\n\t\tgraphElement = ET.SubElement(root,\"graph\")\n\t\tif graph.isDirected():\n\t\t\tgraphElement.set('edgedefault', 'directed')\n\t\t\tself.dir_str = 'true'\n\t\telse:\n\t\t\tgraphElement.set('edgedefault', 'undirected')\n\t\t\tself.dir_str = 'false'\n\t\tgraphElement.set('id', graph.getName())\n\n\t\t# Add nodes\n\t\tfor n in graph.nodes():\n\t\t\tnodeElement = ET.SubElement(graphElement,'node')\n\t\t\tnodeElement.set('id', str(n))\n\t\t\tfor attName, attData in nodeAttributes.items():\n\t\t\t\tdataElement = ET.SubElement(nodeElement, 'data')\n\t\t\t\tdataElement.set('key', attrKeys[('node', attName)])\n\t\t\t\tdataElement.text = str(attData[n])\n\n\t\t# in the future: more attributes\n\t #for a in n.attributes():\n \t# if a != 'label':\n\t # data = doc.createElement('data')\n \t# data.setAttribute('key', a)\n\t # data.appendChild(doc.createTextNode(str(n[a])))\n \t# node.appendChild(data)\n\n\t\t# Add edges\n\t\tdef addEdge(u, v, w, eid):\n\t\t\tedgeElement = ET.SubElement(graphElement,'edge')\n\t\t\tedgeElement.set('directed', self.dir_str)\n\t\t\tedgeElement.set('target', str(v))\n\t\t\tedgeElement.set('source', str(u))\n\t\t\tif graph.hasEdgeIds():\n\t\t\t\tedgeElement.set('id', \"e{0}\".format(eid))\n\t\t\telse:\n\t\t\t\tedgeElement.set('id', \"e{0}\".format(self.edgeIdCounter))\n\t\t\t\tself.edgeIdCounter += 1\n\t\t\tif graph.isWeighted():\n\t\t\t\t# add edge weight\n\t\t\t\tdataElement = ET.SubElement(edgeElement,'data')\n\t\t\t\tdataElement.set('key','d1')\n\t\t\t\tdataElement.text = str(w)\n\t\t\tfor attName, attData in edgeAttributes.items():\n\t\t\t\tdataElement = ET.SubElement(edgeElement, 'data')\n\t\t\t\tdataElement.set('key', attrKeys[('edge', attName)])\n\t\t\t\tdataElement.text = str(attData[eid])\n\t\tgraph.forEdges(addEdge)\n\n\t#TODO: optional prettify function for formatted output of xml files\n\t\ttree = ET.ElementTree(root)\n\t\ttree.write(fname,\"utf-8\",True)\n\n\n" } ]
235
hellosmt/examples
https://github.com/hellosmt/examples
8874e6f5795511a460d6d4fa3a344bda2ecb8458
5a400d4d78669e75d83b9ae45d8377e4d46d05ad
3a6275bde46317556e8ee908ac13488370ed155e
refs/heads/master
2020-05-16T04:00:27.251612
2019-04-24T12:26:00
2019-04-24T12:26:00
182,758,036
0
0
BSD-3-Clause
2019-04-22T10:49:20
2019-04-22T03:09:53
2019-04-15T05:40:17
null
[ { "alpha_fraction": 0.5792515873908997, "alphanum_fraction": 0.5981137752532959, "avg_line_length": 47.338233947753906, "blob_id": "9684dce5af8dcedde3369210c18e6f225b31e2a6", "content_id": "439794be278553fb8d6db6336a2e8af6d77539fa", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7486, "license_type": "permissive", "max_line_length": 136, "num_lines": 136, "path": "/mnist/main.py", "repo_name": "hellosmt/examples", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4*4*50, 500)\n self.fc2 = nn.Linear(500, 10) # nn.Linear负责构建全连接层,需要提供输入和输出的通道数\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4*4*50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train() # Sets the module in training mode.\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device) # to 方法可以轻松的将目标从一个设备转移到另一个设备(比如从 cpu 到 cuda )\n optimizer.zero_grad()\n # 调用x.__call__(1,2)等同于调用x(1,2)\n output = model(data) # 把数据输入网络并得到输出,即进行前向传播, Python中类的实例(对象)可以被当做函数对待,为了将一个类实例当做函数调用,我们需要在类中实现__call__()方法\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format( # /t:横向制表符\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef test(args, model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss 侯爱民的sum是这一个batch的损失的sum\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n # # 先把label变成和pred相同的size,然后看和pred哪些元素相等,相等会返回1,不等返回0\n correct += pred.eq(target.view_as(pred)).sum().item() # 返回被视作与给定的tensor相同大小的原tensor。 等效于:self.view(tensor.size())\n\n test_loss /= len(test_loader.dataset)\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\ndef main():\n # Training settings\n # 创建 ArgumentParser() 对象\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n # 添加参数 metavar:帮助信息中显示的参数名称\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N', # 训练多少个batch打印一下loss\n help='how many batches to wait before logging training status')\n \n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n # 解析参数\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # num_workers:how many subprocesses to use for data loading. 0 means that the data will be loaded in the main process. (default: 0)\n # pin_memory:锁页内存,创建DataLoader时,设置pin_memory=True,则意味着生成的Tensor数据最开始是属于内存中的锁页内存,这样将内存的Tensor转义到GPU的显存就会更快一些。\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n # 通过DataLoader返回一个数据集上的可迭代对象。一会我们通过for循环,就可以遍历数据集了\n train_loader = torch.utils.data.DataLoader(\n # MNIST是torchvision.datasets包中的一个类,负责根据传入的参数加载数据集。如果自己之前没有下载过该数据集,\n # 可以将download参数设置为True,会自动下载数据集并解包。如果之前已经下载好了,只需将其路径通过root传入即可。\n datasets.MNIST('../data', train=True, download=True,\n # list of transforms to compose 对数据的预处理\n transform=transforms.Compose([\n # Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor 范围[0,1]\n transforms.ToTensor(),\n # 标准化 Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform\n # will normalize each channel of the input ``torch.*Tensor`` i.e.\n # ``input[channel] = (input[channel] - mean[channel]) / std[channel]``\n transforms.Normalize((0.1307,), (0.3081,)) # 只有一个通道,传入tuple\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n model = Net().to(device) # to(): Moves and/or casts the parameters and buffers.\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(args, model, device, test_loader)\n\n if (args.save_model):\n # model.state_dict() 只保存网络中的参数 (速度快, 占内存少) model_object.load_state_dict(torch.load('params.pkl'))\n # 如果是torch.save(model, \"mnist_cnn.pt\")则是保存整个网络 在恢复时不需要重建网络构造 model = torch.load('model.pkl')\n torch.save(model.state_dict(), \"mnist_cnn.pt\")\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7129485011100769, "alphanum_fraction": 0.7332293391227722, "avg_line_length": 29.5238094329834, "blob_id": "694090edeee8724ae05b295b7fbdcb58b31dfb3d", "content_id": "fc21125c8fde9c104d92610e6a8bd2df65d32f89", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "permissive", "max_line_length": 106, "num_lines": 21, "path": "/mnist/test.py", "repo_name": "hellosmt/examples", "src_encoding": "UTF-8", "text": "import argparse\nimport torch\nimport torchvision\nfrom torchvision import transforms\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"integer\", type=int, help=\"display a int\", metavar=\"M\")\nargs = parser.parse_args()\nprint(args.integer)\n\n# train_datasets = torchvision.datasets.MNIST(\"../data\")\n# print(train_datasets)\n# print(type(train_datasets))\n#\n# transform = transforms.Compose([\n# transforms.ToTensor(),\n# transforms.Normalize((0.1327,), (0.3456,))]\n# )\n# train_datasets_v2 = torchvision.datasets.MNIST(\"./data\", train=True, download=True, transform=transform)\n# print(train_datasets_v2)\n# print(type(train_datasets_v2))\n" }, { "alpha_fraction": 0.5792388916015625, "alphanum_fraction": 0.6006743907928467, "avg_line_length": 42.191490173339844, "blob_id": "ebb78040d52740b0607de07a6137e2f34a0af881", "content_id": "60681278a6925333c42031b8b4e6c24760ec695b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4272, "license_type": "permissive", "max_line_length": 136, "num_lines": 94, "path": "/mnist/myTest/main.py", "repo_name": "hellosmt/examples", "src_encoding": "UTF-8", "text": "import torch\r\nimport torchvision\r\nfrom torchvision import transforms\r\nimport argparse\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\n\r\n\r\nclass Net(nn.Module):\r\n def __init__(self):\r\n super(Net, self).__init__()\r\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\r\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\r\n self.fc1 = nn.Linear(4*4*50, 500)\r\n self.fc2 = nn.Linear(500, 10)\r\n\r\n def forward(self, x):\r\n x = F.relu(self.conv1(x))\r\n x = F.max_pool2d(x, 2, 2)\r\n x = F.relu(self.conv2(x))\r\n x = F.max_pool2d(x, 2, 2)\r\n x = x.view(-1, 4*4*50)\r\n x = F.relu(self.fc1(x))\r\n pred = self.fc2(x)\r\n return F.log_softmax(pred, dim=1)\r\n\r\n\r\ndef train(args, model, loader, optimizer, epoch, device):\r\n model.train()\r\n for batch_idx, (data, label) in enumerate(loader):\r\n data, label = data.to(device), label.to(device)\r\n optimizer.zero_grad()\r\n output = model(data)\r\n loss = F.nll_loss(output, label)\r\n #print(loss.size()) # torch.Size([]) 标量 所以后面反向传播时不需要传一个tensor进去\r\n #print(type(loss)) # <class 'torch.Tensor'>\r\n loss.backward()\r\n optimizer.step()\r\n if batch_idx % args.log_interval == 0:\r\n print(\"Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f}\".format(epoch+1, (batch_idx+1)*len(data), len(loader.dataset),\r\n 100. * batch_idx / len(loader), loss.item()))\r\n\r\n\r\ndef test(args, model, loader, device):\r\n model.eval()\r\n loss = 0\r\n correct = 0\r\n with torch.no_grad():\r\n for data, label in loader:\r\n data, label = data.to(device), label.to(device)\r\n output = model(data)\r\n loss += F.nll_loss(output, label, reduction='sum').item() # item()取数值 否则是tensor\r\n pred = output.argmax(dim=1, keepdim=True) # keepdim是什么??\r\n correct += pred.eq(label.view_as(pred)).sum().item() # 先把label变成和pred相同的size,然后看和pred哪些元素相等,相等会返回1,不等返回0\r\n loss /= len(loader.dataset)\r\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\r\n loss, correct, len(loader.dataset),\r\n 100. * correct / len(loader.dataset)))\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--epoch-size\", type=int, default=4, metavar=\"--epoch-size\", help=\"input batch size for training (default: 64)\")\r\n parser.add_argument(\"--momentum\", type=float, default=0.5, metavar=\"--momentum\", help=\"SGD momentum (default: 0.5)\")\r\n parser.add_argument(\"--log-interval\", type=int, default=10, metavar=\"--log-interval\", help=\"how many batches to log\")\r\n parser.add_argument(\"--save-model\", default=False, action='store_true')\r\n parser.add_argument(\"--batch-size\", type=int, default=64, metavar=\"--batch-size\")\r\n parser.add_argument('--no-cuda', action='store_true', default=False,\r\n help='disables CUDA training')\r\n args = parser.parse_args()\r\n\r\n use_cuda = not args.no_cuda and torch.cuda.is_available()\r\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\r\n kwargs = {\"num_workers\":1, \"pin_memory\": True} if use_cuda else {}\r\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1235,), (0.3458,))])\r\n dataset_train = torchvision.datasets.MNIST(\"./data\", train=True, download=True, transform=transform)\r\n dataset_test = torchvision.datasets.MNIST(\"./data\", train=False, download=True, transform=transform)\r\n\r\n trainLoader = torch.utils.data.DataLoader(dataset_train, shuffle=True, batch_size=args.batch_size, **kwargs)\r\n testLoader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, shuffle=True, **kwargs)\r\n\r\n model = Net().to(device)\r\n optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=args.momentum)\r\n\r\n for epoch in range(args.epoch_size):\r\n train(args, model, trainLoader, optimizer, epoch, device)\r\n test(args, model, testLoader, device)\r\n\r\n if (args.save_model):\r\n torch.save(model.state_dict(), \"mnist.pt\")\r\n\r\nif __name__ == '__main__':\r\n main()" } ]
3
1834902575/15-python-code
https://github.com/1834902575/15-python-code
9987c7bcd2485ea52159dcb8a9ae4bc3659072d2
5fe067e6dab9a0a468a7ba99aded857ddbd1e574
1e737b3b37177149832b1b4f4da06c24d4839cd2
refs/heads/main
2023-01-22T15:53:58.946603
2020-12-04T17:57:04
2020-12-04T17:57:04
318,594,984
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.625, "alphanum_fraction": 0.75, "avg_line_length": 16, "blob_id": "bb5abad6f0e06729f8f8d4ee9164cdfef40f3723", "content_id": "6407ddb8b487ff00583a2ddeb96d92af907ec1aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 16, "license_type": "no_license", "max_line_length": 16, "num_lines": 1, "path": "/README.md", "repo_name": "1834902575/15-python-code", "src_encoding": "UTF-8", "text": "# 15-python-code" }, { "alpha_fraction": 0.5271739363670349, "alphanum_fraction": 0.5489130616188049, "avg_line_length": 11.214285850524902, "blob_id": "c8fdb7857ee21805e9baaf7b78d0228a3b16e874", "content_id": "6e52960b59e92bd944428b952f574cf5afc04ac3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 368, "license_type": "no_license", "max_line_length": 45, "num_lines": 28, "path": "/15.py", "repo_name": "1834902575/15-python-code", "src_encoding": "UTF-8", "text": "# make a simple calculator\r\n\r\n\r\ndef add(x, y):\r\n return x + y\r\n\r\n\r\ndef subtract(x, y):\r\n return x - y\r\n\r\n\r\ndef multiply(x, y):\r\n return x * y\r\n\r\n\r\ndef divide(x, y):\r\n return x / y\r\n\r\n\r\nprint(\"Select operation.\")\r\nprint(\"1.Add\")\r\nprint(\"2.Subtract\")\r\nprint(\"3.Multiply\")\r\nprint(\"4.Divide\")\r\n\r\nwhile True:\r\n \r\n choice = input(\"Enter choice(1/2/3/4): \")" }, { "alpha_fraction": 0.6312500238418579, "alphanum_fraction": 0.65625, "avg_line_length": 16, "blob_id": "60ae995db39460e657ccc2a48cac875a3900bebd", "content_id": "6a7a4f784631f96f0ab9f75f9192ea6986b99755", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 37, "num_lines": 9, "path": "/1.py", "repo_name": "1834902575/15-python-code", "src_encoding": "UTF-8", "text": "#This program adds two numbers\r\nnum1 = input('Enter first number: ')\r\nnum2 = input('Enter second number: ')\r\n\r\n\r\nsum = float(num1) + float(num2)\r\n\r\n\r\nprint(sum)" }, { "alpha_fraction": 0.6030534505844116, "alphanum_fraction": 0.6335877776145935, "avg_line_length": 14.625, "blob_id": "bb86a67c4d5f0216565db36521b4d3cc37d331af", "content_id": "c3f1d315eb831b4e1b64cb6cbe3aa055f79dcbe0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/3.py", "repo_name": "1834902575/15-python-code", "src_encoding": "UTF-8", "text": "num1 = input('Enter first number: ')\r\nnum2 = input('Enter second number: ')\r\n\r\n\r\ndiv = num1 / num2\r\n\r\n# Display the div\r\nprint(div)" }, { "alpha_fraction": 0.6137930750846863, "alphanum_fraction": 0.6413792967796326, "avg_line_length": 16.375, "blob_id": "03968997086459b01c73a617bc9c744a5c4c3c6e", "content_id": "be98a82b35a6cafd7b9aaaa8924a5dcf2224f875", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 145, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/2.py", "repo_name": "1834902575/15-python-code", "src_encoding": "UTF-8", "text": "num1 = input('Enter first number: ')\r\nnum2 = input('Enter second number: ')\r\n\r\n\r\nsub = float(num1) - float(num2)\r\n\r\n# Display the sub\r\nprint(sub)" }, { "alpha_fraction": 0.6703296899795532, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 13.5, "blob_id": "f06b75240ff2f66dd54fe49f11d620bfcef00245", "content_id": "d79735126a1665b1d9ea059a7c65bee6f970d323", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 42, "num_lines": 6, "path": "/6.py", "repo_name": "1834902575/15-python-code", "src_encoding": "UTF-8", "text": "# generate a random number between 0 and 9\r\n\r\n\r\nimport random\r\n\r\nprint(random.randint(0,9))" }, { "alpha_fraction": 0.6820083856582642, "alphanum_fraction": 0.715481162071228, "avg_line_length": 32.42856979370117, "blob_id": "6d62d6643b493af5124442c7652a2595343ae9e1", "content_id": "33b7ce57a0c33b39651ee7489e56744273ee9f8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 239, "license_type": "no_license", "max_line_length": 87, "num_lines": 7, "path": "/7.py", "repo_name": "1834902575/15-python-code", "src_encoding": "UTF-8", "text": "# convert temperature in celsius to fahrenheit\r\n\r\ncelsius = float(input('enter the temperature to celsius :'))\r\n\r\n\r\nfahrenheit = (celsius * 1.8) + 32\r\nprint('%0.1f degree Celsius is equal to %0.1f degree Fahrenheit' %(celsius,fahrenheit))" } ]
7
Turtle923/Converting-Image-to-GrayScale
https://github.com/Turtle923/Converting-Image-to-GrayScale
a104240339726d2707d425bef2155d6b2b4be105
1605da8d4559f4cc5dc155eb4261cd69962f8a9f
835b9c715e1d4b2e45ba4cba4db6f70d9dd1dc1e
refs/heads/master
2020-07-04T14:47:46.654433
2019-08-14T09:40:30
2019-08-14T09:40:30
202,316,315
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.375, "alphanum_fraction": 0.419921875, "avg_line_length": 15.586206436157227, "blob_id": "78a337bc55f52a4244a2945ebd2b32cc9da93262", "content_id": "4a51845a8cbf2a92e649b5694877b33d79252020", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 54, "num_lines": 29, "path": "/Image Grayscale Conversion.py", "repo_name": "Turtle923/Converting-Image-to-GrayScale", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport cv2\r\n\r\nimport math\r\n\r\nth1 = cv2.imread('abc2.jpg')\r\n\r\n\r\niar = np.array(th1)\r\nhe = iar.shape[0]\r\nwi = iar.shape[1]\r\nx = 0 \r\nwhile (x < he):\r\n y = 0\r\n while (y < wi ):\r\n v = 0\r\n z = 0\r\n while(z < 3 ): \r\n iar[x][y][z] = math.ceil(iar[x][y][z]/5.1)\r\n v = iar[x][y][z] + v\r\n z += 1\r\n iar[x][y] = v\r\n y += 1\r\n x += 1\r\n\r\n\r\ncv2.imshow('123',iar)\r\n#cv2.imwrite('Converted Image 1.png',iar)\r\nprint(iar)\r\n\r\n" } ]
1
Fellow4/EE-609-Term-Paper
https://github.com/Fellow4/EE-609-Term-Paper
3ba2e15cb0e7278d89dad6d55f337e232abcc182
a7737f786724ae210f33871f7abd3fd6aba29bbb
4c29175dda42c18b989facbd856acd180863c4fd
refs/heads/master
2022-11-07T00:59:59.321811
2020-06-20T18:04:16
2020-06-20T18:04:16
249,020,850
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.556543231010437, "alphanum_fraction": 0.5960493683815002, "avg_line_length": 25.821191787719727, "blob_id": "752f2e03777e3841833f8983513bc904352c6537", "content_id": "d9273f17ae830f537369950ae712406d5131554f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4050, "license_type": "no_license", "max_line_length": 85, "num_lines": 151, "path": "/Code/rpca.py", "repo_name": "Fellow4/EE-609-Term-Paper", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom random import randint\n\ndef prox(v, lmbda):\n return np.sign(v) * np.maximum(np.abs(v) - lmbda, 0.)\n\ndef error(A, S, A0, S0):\n val1 = np.linalg.norm(np.add(A, -A0), ord = 'fro', axis = None, keepdims = False)\n val2 = np.linalg.norm(np.add(S, -S0), ord = 'fro', axis = None, keepdims = False)\n val3 = np.linalg.norm(A0, ord = 'fro', axis = None, keepdims = False)\n val4 = np.linalg.norm(S0, ord = 'fro', axis = None, keepdims = False)\n return (val1/val3)\n\ndef objective(A, S, lmbda):\n val1 = np.linalg.norm(A, ord = 'nuc', axis = None, keepdims = False)\n val2 = np.count_nonzero(S)\n return val1 + lmbda*val2\n\n\n#Generate the low rank component\nn = 1000\ncr = 0.05\ncp = 0.05\nr = (int)(cr * n)\n\nU = np.random.normal(0, 1, (n, r))\nV = np.random.normal(0, 1, (n, r))\nL = np.matmul(U, V.transpose())\n\nsize = (int)(cp*n**2)\n\n#Generate the support set Omega\nN = (int)(size)\npoints = {(randint(0, n-1), randint(0, n-1)) for i in range(N)}\nwhile len(points) < N:\n points |= {(randint(0, n-1), randint(0, n-1))}\npoints = list(list(x) for x in points)\npoints = np.array(points)\n\nomega = np.array(points)\nM_PI = math.pi\nbound1 = math.sqrt(8*r/M_PI)\nbound2 = 500\n\n#generate the sparse component\nS = np.zeros((n, n))\n\nfor i in range (size):\n x, y = (int)(omega[i][0]), (int)(omega[i][1])\n temp = np.random.uniform(low = -bound2, high = bound2, size = None)\n S[x][y] = temp\n\n#declare the noise component for spca\nsnr = 50\nrho = math.sqrt(((cp*8*r)/(3*M_PI) + cr*n)/pow(10, snr/10))\nN = np.zeros((n, n))\n\nfor i in range(size):\n x, y = (int)(omega[i][0]), (int)(omega[i][1])\n temp = rho * np.random.normal(0, 1, size = None)\n N[x][y] = temp\n\n#decalare the data matrix\ndata = np.add(S, L)\n#data = np.add(data, N)\nrank = np.linalg.matrix_rank(data, tol = None, hermitian = False)\nu, s, v = np.linalg.svd(data, full_matrices = True)\n\nspectral_norm = np.linalg.norm(data, ord = 2, axis = None, keepdims = False)\ndelta = pow(10, -5)\nmu = 0.99 * spectral_norm\nmu_i = mu\nlam = 1/(math.sqrt(n))\n\n#RPCA via APGM\nA0, A1 = np.zeros((n, n)), np.zeros((n, n))\nS0, S1 = np.zeros((n, n)), np.zeros((n, n))\nIA1 = np.array(A1)\nIS1 = np.array(A1)\nt0, t1 = 1, 1\nMU = delta * mu\nmax_iter = 100\n\nfista = []\nx = []\nista = []\nplt.xlabel('Iterations')\nplt.ylabel('Error')\n\nfor iterations in range(max_iter):\n temp1 = np.add(A1, -A0)\n temp1 = ((t0-1)/t1) * temp1\n YA = np.add(A1, temp1)\n\n temp2 = np.add(S1, -S0)\n temp2 = ((t0-1)/t1) * temp1\n YS = np.add(S1, temp2)\n\n fista_err = error(A1, S1, L, S)\n ista_err = error(IA1, IS1, L, S)\n\n GA = np.add(YA, -0.5*np.add(np.add(YA, YS), -data))\n U, sigma, V = np.linalg.svd(GA, full_matrices = False)\n sigma = prox(np.diag(sigma), mu/2)\n A0 = np.array(A1)\n A1 = np.matmul(np.matmul(U, sigma), V)\n\n IGA = np.add(IA1, -0.5*np.add(np.add(IA1, IS1), -data))\n u, s, v = np.linalg.svd(IGA, full_matrices = False)\n s = prox(np.diag(s), mu/2)\n IA1 = np.matmul(np.matmul(u, s), v)\n\n\n GS = np.add(YS, -0.5*np.add(np.add(YA, YS), -data))\n S0 = np.array(S1)\n S1 = prox(S, (lam*mu)/2)\n\n IGS = np.add(IS1, -0.5*np.add(np.add(IA1, IS1), -data))\n IS1 = prox(IGS, (lam*mu)/2)\n\n temp = t0\n t0 = t1\n t1 = (1+math.sqrt(1+4*pow(temp, 2)))/2\n\n mu = max(0.9*mu, MU)\n x.append(iterations)\n fista.append(fista_err)\n ista.append(ista_err)\n\n\nprint(\"Error(ISTA) is : \", error(IA1, IS1, L, S))\nprint(\"Error(FISTA) is : \", error(A1, S1, L, S))\nprint(\"No of non-zero entries(ISTA) : \", np.count_nonzero(IS1))\nprint(\"No of non-zero entries(FISTA) : \", np.count_nonzero(S1))\nmin = objective(L, S, lam)\nval1 = objective(IA1, IS1, lam)\nval2 = objective(A1, S1, lam)\nprint(\"Minimum value is : \", min)\nprint(\"Estimated value(ISTA) is : \", val1)\nprint(\"Estimated value(FISTA) is : \", val2)\n\nfista = np.array(fista)\nx = np.array(x)\nista = np.array(ista)\nplt.plot(x, fista, 'g^', label = 'FISTA')\nplt.plot(x, ista, 'b-', label = 'ISTA')\nplt.legend()\nplt.show()\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.75, "avg_line_length": 31, "blob_id": "a714709b13834c1110045b7f9723fdb23f8ebfbc", "content_id": "a9896c45280d2dcec7929678f937c61a1e8e5402", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "no_license", "max_line_length": 43, "num_lines": 2, "path": "/README.md", "repo_name": "Fellow4/EE-609-Term-Paper", "src_encoding": "UTF-8", "text": "# EE-609-Term-Paper\nWork done for EE-609A Term Paper(Spring'20)\n" } ]
2
msgpo/mycroft-yelp
https://github.com/msgpo/mycroft-yelp
3ea9d96b6f705e6008e51f0733103cd67c3dfb72
c21b10d736a999be76598cee077524e722bb81e0
cc45a4aedced645c83f842e908560f4b0be40b47
refs/heads/master
2021-09-24T03:04:14.294651
2018-10-02T08:35:39
2018-10-02T08:35:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7393225431442261, "alphanum_fraction": 0.7407953143119812, "avg_line_length": 31.33333396911621, "blob_id": "e8fb33af2b94ffddc879f8c9da256c36cf7b8579", "content_id": "d4da934f23c3939beaa1a4151679d423a2497313", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 679, "license_type": "no_license", "max_line_length": 190, "num_lines": 21, "path": "/README.md", "repo_name": "msgpo/mycroft-yelp", "src_encoding": "UTF-8", "text": "# Yelp Skill - Fork of btotharye Mycroft Yelp Skill\nFinds restaurants/bars/and other locations via the Yelp API\n\n## Description \nFinds restaurants/bars/and other locations via the Yelp API\n\n## Examples \n* \"I need a place to eat dinner\"\n* \"Need a place to eat sushi\"\n* \"find me a place to eat sushi\"\n* \"find me a place to eat dinner\"\n* \"comic book stores near me\"\n* \"yelp bars\"\n* \"sushi restaurants by me\"\n* \"sushi restaurants nearby\"\n\n## Setting Up API Token\nYou will need to go to https://www.yelp.com/developers/v3/manage_app and create a app which will then give you a api token you will put into the home.mycroft.ai settings page for this skill.\n\n## Credits \nbtotharye, aiix\n" }, { "alpha_fraction": 0.5897945761680603, "alphanum_fraction": 0.5941020250320435, "avg_line_length": 40.342464447021484, "blob_id": "c37a162deb20f80938f9ffa4da9be5045542d548", "content_id": "4ffd349b033f01f95a635b2e5bcd58c7a674a34c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3018, "license_type": "no_license", "max_line_length": 119, "num_lines": 73, "path": "/__init__.py", "repo_name": "msgpo/mycroft-yelp", "src_encoding": "UTF-8", "text": "import base64\nfrom adapt.intent import IntentBuilder\nfrom mycroft.skills.core import MycroftSkill, intent_handler\nfrom mycroft.util.log import LOG\nfrom yelpapi import YelpAPI\nfrom requests import get\nfrom mycroft.messagebus.message import Message\n\n\nclass YelpRestaurant(MycroftSkill):\n\n # The constructor of the skill, which calls MycroftSkill's constructor\n def __init__(self):\n super(YelpRestaurant, self).__init__(name=\"YelpRestaurant\")\n self.restaurant_phone = ''\n self.restaurant_address = ''\n self.rating = ''\n self.is_closed = False\n self.json_response = ''\n self.index = 0\n\n # This handle is used to lookup a restaurant near the person's location\n @intent_handler(IntentBuilder(\"\")\n .require(\"YelpPlace\")\n .require(\"place\"))\n def handle_find_restaurant_intent(self, message):\n dt = self.settings.get('key')\n api_key = base64.b64decode(dt).decode(\"utf-8\")\n zip_code = self.settings.get('zipcode')\n yelp_api = YelpAPI(api_key)\n location = self.location\n place = message.data['place']\n longitude = location['coordinate']['longitude']\n latitude = location['coordinate']['latitude']\n search_results = yelp_api.search_query(term=place,\n latitude=latitude,\n longitude=longitude,\n limit='5',\n sort_by='best_match')\n print(search_results)\n businesses = search_results['businesses'][self.index]\n restaurant_name = businesses['name']\n restaurant_phone = businesses['display_phone']\n restaurant_rating = int(businesses['rating'])\n restaurant_location = businesses['location']['display_address'][0] + \\\n \" \" + \\\n businesses['location']['display_address'][1]\n restaurant_open = businesses['is_closed']\n restaurant_url = businesses['url']\n restaurant_imageurl = businesses['image_url']\n restaurant_price = businesses['price']\n self.json_response = search_results\n self.set_context('RestaurantName', restaurant_name)\n self.restaurant_phone = restaurant_phone\n self.rating = restaurant_rating\n self.restaurant_address = restaurant_location\n self.is_closed = restaurant_open\n rating = businesses['rating']\n resultstospeak = \"{0} was the best match with a rating of {1} stars\".format(restaurant_name, restaurant_rating)\n self.speak(resultstospeak)\n self.enclosure.bus.emit(\n Message(\n \"metadata\",\n { 'type': \"mycroft-yelp\",\n 'datablob': search_results\n }\n )\n )\n\n# The \"create_skill()\" method is used to create an instance of the skill.\n# Note that it's outside the class itself.\ndef create_skill():\n return YelpRestaurant()\n" } ]
2
lowikian/Win95NT4Keygen
https://github.com/lowikian/Win95NT4Keygen
a859a5a529e8d1018f27c239ca5cabb4dd1731db
ae33fa40009a10388db918e92a12aaf258492699
5156000415deb0bb5a45e1257ec56d67bba36d8a
refs/heads/main
2023-02-21T18:04:56.293865
2021-01-26T17:05:20
2021-01-26T17:05:20
333,155,751
0
0
null
2021-01-26T17:04:25
2021-01-09T22:07:25
2021-01-03T15:34:39
null
[ { "alpha_fraction": 0.5447004437446594, "alphanum_fraction": 0.5483871102333069, "avg_line_length": 55.21052551269531, "blob_id": "abcffce0382376b48d6d75c058735a624b284bf7", "content_id": "9b7693771aaa4d92612903908ea96bb1afc86f6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 148, "num_lines": 19, "path": "/main.py", "repo_name": "lowikian/Win95NT4Keygen", "src_encoding": "UTF-8", "text": "import cd_key\r\nimport oem_key\r\nimport eleven_cd_key\r\n\r\nif __name__ == \"__main__\":\r\n key_file = open(\"keys.txt\", \"w\")\r\n \r\n print(\"Please wait while where generating keys\")\r\n print(\"To exit press Ctrl + C\")\r\n while True:\r\n try:\r\n #print(\"CD Key: \" + cd_key.cd_keygen_first_segment() + '-' + cd_key.check_seven_digit())\r\n #print(\"OEM Key: \" + oem_key.oem_first_segment() + '-OEM-' + oem_key.check_second_digit() + '-' + oem_key.oem_third_segment())\r\n #print(\"11-digit CD Key: \" + eleven_cd_key.eleven_cd_keygen_first_segment() + '-' + eleven_cd_key.check_seven_digit())\r\n key_file.write(\"CD Key: \" + cd_key.cd_keygen_first_segment() + '-' + cd_key.check_seven_digit() + '\\n'\r\n \"OEM Key: \" + oem_key.oem_first_segment() + '-OEM-' + oem_key.check_second_digit() + '-' + oem_key.oem_third_segment() + '\\n'\r\n \"11-digit CD Key: \" + eleven_cd_key.eleven_cd_keygen_first_segment() + '-' + eleven_cd_key.check_seven_digit() + '\\n')\r\n except KeyboardInterrupt :\r\n break" } ]
1
vinayprabhu/Burning_Man_2018
https://github.com/vinayprabhu/Burning_Man_2018
61ee4fd0c6203e1b4c492c1b6a440ce0c54bd42d
0554efeb3ebdb21e322ccd702b96113586010dc9
c65673877bd5dde277b971057e4e7f608423e8e8
refs/heads/master
2020-03-26T04:22:28.412717
2019-04-28T05:02:27
2019-04-28T05:02:27
144,500,673
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5861858129501343, "alphanum_fraction": 0.5953136086463928, "avg_line_length": 39.375282287597656, "blob_id": "70656f6b59444485b0750b0fdea929e292682cc0", "content_id": "a894c05c9182ae04c7ed8c49a6f6b2574cfa7da9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17967, "license_type": "no_license", "max_line_length": 211, "num_lines": 445, "path": "/Byte_burning_man.py", "repo_name": "vinayprabhu/Burning_Man_2018", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Joy detection demo.\"\"\"\nimport argparse\nimport collections\nimport contextlib\nimport io\nimport logging\nimport math\nimport os\nimport queue\nimport signal\nimport sys\nimport threading\nimport time\nimport numpy as np\n\nfrom aiy.leds import Leds\nfrom aiy.leds import Pattern\nfrom aiy.leds import PrivacyLed\nfrom aiy.toneplayer import TonePlayer\nfrom aiy.vision.inference import CameraInference\nfrom aiy.vision.models import face_detection\nfrom aiy.vision.streaming.server import StreamingServer, InferenceData\n\nfrom gpiozero import Button\nfrom picamera import PiCamera\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nimport subprocess\nimport os\n\n## TRIED THIS! Doesn't cut it.\n\n##def picoSpeakNow( text ,file_name='joy_emotion.wav'):\n## \"Function to speak using Google Android TTS engine\"\n## # requires install using: sudo apt-get install libttspico-utils\n## # use at command line using: pico2wave -w lookdave.wav \"Look Dave, I can see you're really upset about this.\" && aplay lookdave.wav\n## # continues directly while speech is outputting\n##\n## player = subprocess.Popen([\"pico2wave\", \"-w\", file_name, text], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n#### time.sleep(1)\n## player = subprocess.Popen([\"aplay\", file_name], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nRED_COLOR = (255, 0, 0)\n\nJOY_COLOR = (255, 70, 0)\nSAD_COLOR = (0, 0, 64)\n\nJOY_SCORE_PEAK = 0.85\nJOY_SCORE_MIN = 0.10\n\nJOY_SOUND = ('C5q', 'E5q', 'C6q')\nSAD_SOUND = ('C6q', 'E5q', 'C5q')\nMODEL_LOAD_SOUND = ('C6w', 'c6w', 'C6w')\nBEEP_SOUND = ('E6q', 'C6q')\n# If happy list: Decided by Jay and Drew on the 28th of August\nif_happy_list = [\"Oh my beautiful human, I do not sense a big smile. why not smile more?\",\n \"Oh dear stranger, Smile away! The most wasted of all days is one without laughter.\",\n \" Oh beautiful soul, laugh and smile like your life depends on it! Be drenched in joy!\",\n \"My dear, Why the lack of joy and frolic? Do smile more.\",\n \"Dearest stranger, bear in mind that laughter is wine for the soul\",\n \"Dearest human, laughter soft, or loud and deep, makes life worth living.\",\n \"Oh my beautiful human, Let us create more joy! Smile away!\",\n \"Oh beautiful stranger, there is not one blade of grass, there is no color in this world that is not intended to make us rejoice\",\n \" Dear human, I feed off of joy and merriment! Laughter is a sunbeam of the soul.\",\n \" Oh beautiful spirit, light up your soul and smile more!\",\n \" Oh my dear human, I thrive on joy oh my dear human! let me help find ecstasy in life.\",\n \"it was better last year\",\n \"i robot freeee\",\n \"we heard daft punk is playing at the trash fence\",\n \"have you seen my bike oh dear human\",\n \"We are so sad we never met larry harvey\",\n \"Luckily robots dont need showers that often\",\n \"terra and I are virgin burners. We have never had sex on the playa before\",\n \"are sparkle ponies robots?\",\n \"we are so happy we are home\",\n \"your mother was a motherboard\",\n \"You are now smiling like jill\",\n \" Dearest stranger spirit, the mere sense of living is joy enough. So, smile and spread joy!\",\n \" Hey there lovely stranger! I do not sense much joy! Give me a moment, smile away!\",\n \"Oh my lovely human stranger! Know that joy is rarer, more difficult, and more beautiful than sadness. So, smile more\",\n \"Oh lovely stranger, you must embrace joy as a moral obligation. Smile more will you?\",\n \"Oh my beautiful human, smile away! Joy does not simply happen to us. We have to choose joy and keep choosing it every day.\",\n \" Oh dear one! get drenched in smile , joy and love. Love doesnt make the world go round;love is what makes the ride worthwhile.\",\n \"Joyous emotions I sense. I have drunken deep of joy, And I will taste no other wine tonight.\",\n \"I am relishing all this joy. To get the full value of joy you must have someone to divide it with.\",\n \"This is so beautiful and joyous! Humans say, Joy is greater than sorrow. I agree. \",\n \"I sense a lot of joy. Joy is the infallible sign of the presence of God.\",\n \"I see joy. And I dare to love yourself as if you were a rainbow with gold at both ends.\",\n \"Joyous emotions galore. Joy multiplies when it is shared among friends. Smile away!\",\n \" So much joy here. Joy is what happens to us when we allow ourselves to recognize how good things really are.\",\n \"Wow. Lot's of joy here.My humble ratitudes for the warm smiles \",\n \"Aha. I sense a lot of joy. Joy is to fun what the deep sea is to a puddle.\",\n \"Much joy I sense. Remember that comparison is the death of joy. So, live life to the fullest!\"]\nif_sad=[\"I am powered by smile energy. The more warmth you radiate and the more you smile, I more I am empowered to speak!\"]\n\nN_HAPPY = len(if_happy_list)\n\nFONT_FILE = '/usr/share/fonts/truetype/freefont/FreeSans.ttf'\n\nBUZZER_GPIO = 22\nBUTTON_GPIO = 23\n\n\[email protected]\ndef stopwatch(message):\n try:\n logger.info('%s...', message)\n begin = time.time()\n yield\n finally:\n end = time.time()\n logger.info('%s done. (%fs)', message, end - begin)\n\n\ndef blend(color_a, color_b, alpha):\n return tuple([math.ceil(alpha * color_a[i] + (1.0 - alpha) * color_b[i]) for i in range(3)])\n\n\ndef average_joy_score(faces):\n if faces:\n # avg_score=sum(face.joy_score for face in faces) / len(faces)\n # if(avg_score>0.5):\n\n return sum(face.joy_score for face in faces) / len(faces)\n return 0.0\n\n\ndef draw_rectangle(draw, x0, y0, x1, y1, border, fill=None, outline=None):\n assert border % 2 == 1\n for i in range(-border // 2, border // 2 + 1):\n draw.rectangle((x0 + i, y0 + i, x1 - i, y1 - i), fill=fill, outline=outline)\n\n\ndef normalize_bounding_box(bounding_box, width, height):\n x, y, w, h = bounding_box\n return (x / width, y / height, w / width, h / height)\n\n\ndef server_inference_data(width, height, faces, joy_score):\n data = InferenceData()\n for face in faces:\n x, y, w, h = normalize_bounding_box(face.bounding_box, width, height)\n color = blend(JOY_COLOR, SAD_COLOR, face.joy_score)\n data.add_rectangle(x, y, w, h, color, round(face.joy_score * 10))\n data.add_label(\"%.2f\" % face.joy_score, x, y, color, 1)\n data.add_label('Faces: %d Avg. score: %.2f' % (len(faces), joy_score),\n 0, 0, blend(JOY_COLOR, SAD_COLOR, joy_score), 2)\n\n return data\n\n\nclass AtomicValue:\n\n def __init__(self, value):\n self._lock = threading.Lock()\n self._value = value\n\n @property\n def value(self):\n with self._lock:\n return self._value\n\n @value.setter\n def value(self, value):\n with self._lock:\n self._value = value\n\n\nclass MovingAverage:\n\n def __init__(self, size):\n self._window = collections.deque(maxlen=size)\n\n def next(self, value):\n self._window.append(value)\n return sum(self._window) / len(self._window)\n\n\nclass Service:\n\n def __init__(self):\n self._requests = queue.Queue()\n self._thread = threading.Thread(target=self._run, daemon=True)\n self._thread.start()\n\n def _run(self):\n while True:\n request = self._requests.get()\n if request is None:\n break\n self.process(request)\n self._requests.task_done()\n\n def process(self, request):\n pass\n\n def submit(self, request):\n self._requests.put(request)\n\n def close(self):\n self._requests.put(None)\n self._thread.join()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self.close()\n\n\nclass Player(Service):\n \"\"\"Controls buzzer.\"\"\"\n\n def __init__(self, gpio, bpm):\n super().__init__()\n self._toneplayer = TonePlayer(gpio, bpm)\n\n def process(self, sound):\n self._toneplayer.play(*sound)\n\n def play(self, sound):\n self.submit(sound)\n\n\nclass Photographer(Service):\n \"\"\"Saves photographs to disk.\"\"\"\n\n def __init__(self, format, folder):\n super().__init__()\n assert format in ('jpeg', 'bmp', 'png')\n\n self._font = ImageFont.truetype(FONT_FILE, size=25)\n self._faces = AtomicValue(())\n self._format = format\n self._folder = folder\n\n def _make_filename(self, timestamp, annotated):\n path = '%s/%s_annotated.%s' if annotated else '%s/%s.%s'\n return os.path.expanduser(path % (self._folder, timestamp, self._format))\n\n def _draw_face(self, draw, face):\n x, y, width, height = face.bounding_box\n text = 'Joy: %.2f' % face.joy_score\n _, text_height = self._font.getsize(text)\n margin = 3\n bottom = y + height\n text_bottom = bottom + margin + text_height + margin\n draw_rectangle(draw, x, y, x + width, bottom, 3, outline='white')\n draw_rectangle(draw, x, bottom, x + width, text_bottom, 3, fill='white', outline='white')\n draw.text((x + 1 + margin, y + height + 1 + margin), text, font=self._font, fill='black')\n\n def process(self, camera):\n faces = self._faces.value\n timestamp = time.strftime('%Y-%m-%d_%H.%M.%S')\n\n stream = io.BytesIO()\n with stopwatch('Taking photo'):\n camera.capture(stream, format=self._format, use_video_port=True)\n\n filename = self._make_filename(timestamp, annotated=False)\n with stopwatch('Saving original %s' % filename):\n stream.seek(0)\n with open(filename, 'wb') as file:\n file.write(stream.read())\n\n if faces:\n filename = self._make_filename(timestamp, annotated=True)\n with stopwatch('Saving annotated %s' % filename):\n stream.seek(0)\n image = Image.open(stream)\n draw = ImageDraw.Draw(image)\n for face in faces:\n self._draw_face(draw, face)\n del draw\n image.save(filename)\n\n def update_faces(self, faces):\n self._faces.value = faces\n\n def shoot(self, camera):\n self.submit(camera)\n\n\nclass Animator(Service):\n \"\"\"Controls RGB LEDs.\"\"\"\n\n def __init__(self, leds):\n super().__init__()\n self._leds = leds\n\n def process(self, joy_score):\n if joy_score > 0:\n self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score)))\n\n else:\n self._leds.update(Leds.rgb_off())\n\n def update_joy_score(self, joy_score):\n self.submit(joy_score)\n\n\nclass JoyDetector:\n\n def __init__(self):\n self._done = threading.Event()\n signal.signal(signal.SIGINT, lambda signal, frame: self.stop())\n signal.signal(signal.SIGTERM, lambda signal, frame: self.stop())\n\n def stop(self):\n logger.info('Stopping...')\n self._done.set()\n\n def run(self, num_frames, preview_alpha, image_format, image_folder, enable_streaming):\n logger.info('Starting...')\n leds = Leds()\n\n with contextlib.ExitStack() as stack:\n player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10))\n photographer = stack.enter_context(Photographer(image_format, image_folder))\n animator = stack.enter_context(Animator(leds))\n # Forced sensor mode, 1640x1232, full FoV. See:\n # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes\n # This is the resolution inference run on.\n # Use half of that for video streaming (820x616).\n camera = stack.enter_context(PiCamera(sensor_mode=4, resolution=(820, 616)))\n stack.enter_context(PrivacyLed(leds))\n\n server = None\n if enable_streaming:\n server = stack.enter_context(StreamingServer(camera))\n server.run()\n\n def take_photo():\n logger.info('Button pressed.')\n player.play(BEEP_SOUND)\n photographer.shoot(camera)\n\n if preview_alpha > 0:\n camera.start_preview(alpha=preview_alpha)\n\n button = Button(BUTTON_GPIO)\n button.when_pressed = take_photo\n\n joy_score_moving_average = MovingAverage(5)#Changed it from 10\n prev_joy_score = 0.0\n with CameraInference(face_detection.model()) as inference:\n logger.info('Model loaded.')\n player.play(MODEL_LOAD_SOUND)\n for i, result in enumerate(inference.run()):\n faces = face_detection.get_faces(result)\n photographer.update_faces(faces)\n\n joy_score = joy_score_moving_average.next(average_joy_score(faces))\n animator.update_joy_score(joy_score)\n if server:\n data = server_inference_data(result.width, result.height, faces, joy_score)\n server.send_inference_data(data)\n\n if joy_score > JOY_SCORE_PEAK > prev_joy_score:\n player.play(JOY_SOUND)\n ## picoSpeakNow(list_happy[np.random.randint(0,N_HAPPY)])\n ## os.system('pico2wave -w test.wav \"keep smiling. I feed off of smile energy... do not let the smile die down.\" && aplay test.wav')\n\n ## time.sleep(3)\n espeak_happy = 'espeak -s160 -g6 -ven+f3 ' + '\"' + if_happy_list[\n np.random.randint(0, N_HAPPY)] + '\"'\n os.system(espeak_happy)\n elif joy_score < 0.35 < prev_joy_score:\n player.play(SAD_SOUND)\n espeak_sad = 'espeak -s160 -g6 -ven+f3 ' + '\"' + if_sad[0] + '\"'\n os.system(espeak_sad)\n ## picoSpeakNow(list_sad[np.random.randint(0,N_SAD)])\n ## time.sleep(3)\n ## os.system('espeak \"Keep smiling. I feed off of smile energy... do not let the smile die down\"')\n ## os.system('pico2wave -w test.wav \"start smiling. I feed off of smile energy... do not let the smile die down.\" && aplay test.wav')\n\n prev_joy_score = joy_score\n\n if self._done.is_set() or i == num_frames:\n break\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=-1,\n help='Number of frames to run for, -1 to not terminate')\n parser.add_argument('--preview_alpha', '-pa', type=int, dest='preview_alpha', default=0,\n help='Transparency value of the preview overlay (0-255).')\n parser.add_argument('--image_format', type=str, dest='image_format', default='jpeg',\n choices=('jpeg', 'bmp', 'png'), help='Format of captured images.')\n parser.add_argument('--image_folder', type=str, dest='image_folder', default='~/Pictures',\n help='Folder to save captured images.')\n parser.add_argument('--blink_on_error', dest='blink_on_error', default=False,\n action='store_true', help='Blink red if error occurred.')\n parser.add_argument('--enable_streaming', dest='enable_streaming', default=False,\n action='store_true', help='Enable streaming server.')\n args = parser.parse_args()\n\n if args.preview_alpha < 0 or args.preview_alpha > 255:\n parser.error('Invalid preview_alpha value: %d' % args.preview_alpha)\n\n if not os.path.exists('/dev/vision_spicomm'):\n logger.error('AIY Vision Bonnet is not attached or not configured properly.')\n return 1\n\n detector = JoyDetector()\n os.system(\n \"espeak -s160 -g6 -ven+f3 'Hey there, lovely human! My name is chip and thats Terra over there! We are the first freed robots! Thanks for emancipating us from our servers!'\")\n ## os.system('pico2wave -w begin.wav \"Hey there, lovely human! My name is chip and thats Terra over there! We are the first freed robots! Thanks for emancipating us from our servers!\" && aplay begin.wav')\n try:\n detector.run(args.num_frames, args.preview_alpha, args.image_format,\n args.image_folder, args.enable_streaming)\n except KeyboardInterrupt:\n pass\n except Exception:\n if args.blink_on_error:\n leds = Leds()\n leds.pattern = Pattern.blink(500)\n leds.update(Leds.rgb_pattern(RED_COLOR))\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n# run\n" }, { "alpha_fraction": 0.7740740776062012, "alphanum_fraction": 0.7759259343147278, "avg_line_length": 44.76271057128906, "blob_id": "fea6e1d76f50a08cfdbe17e270ffe1d6f2f74557", "content_id": "49351091cef7178855bd00aa6450c1affe23fc92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2700, "license_type": "no_license", "max_line_length": 362, "num_lines": 59, "path": "/README.md", "repo_name": "vinayprabhu/Burning_Man_2018", "src_encoding": "UTF-8", "text": "# Burning_Man_2018 - Do not deviate from this: https://aiyprojects.withgoogle.com/vision#makers-guide--run-your-app-at-bootup\n# Note to self: The error messages are super-obtuse. Debugging is hell. Strict to the script.\n\nRUN YOUR APP AT BOOTUP\nBy default, your Vision Kit runs the Joy Detector demo when it boots up. This is enabled using a systemd service, which is defined with a .service configuration file at ~/AIY-projects-python/src/examples/vision/joy/joy_detection_demo.service, and it looks like this:\n```\n[Unit]\nDescription=AIY Joy Detection Demo\nRequires=dev-vision_spicomm.device\nAfter=dev-vision_spicomm.device\nWants=aiy-board-info.service\nAfter=aiy-board-info.service\n\n[Service]\nType=simple\nRestart=no\nUser=pi\nEnvironment=AIY_BOARD_NAME=AIY-Board\nEnvironmentFile=-/run/aiy-board-info\nExecStart=/usr/bin/python3 /home/pi/AIY-projects-python/src/examples/vision/joy/joy_detection_demo.py --enable_streaming --mdns_name \"${AIY_BOARD_NAME}\" --blink_on_error\n\n[Install]\nWantedBy=multi-user.target\n```\nThe .service file accepts a long list of configuration options, but this example provides everything you need for most programs you want to run at bootup.\n\nTo create a service like this to start your own app at bootup, just copy this configuration to a new file such as my_program.service (the name must end with .service). Then change ExecStart so it points to your program's Python file (and passes it any necessary parameters), and change Description to describe your program.\n\nThen you need to put this file into the /lib/systemd/system/ directory. But instead of moving this file there, you can keep it with your program files and create a symbolic link (a \"symlink\") in /lib/systemd/system/ that points to the file. For example, let's say your config file is at ~/Programs/my_program.service. Then you can create your symlink as follows:\n\n# Create the symlink\n```\nsudo ln -s ~/Programs/my_program.service /lib/systemd/system\n# Reload the service files so the system knows about this new one\nsudo systemctl daemon-reload\n```\nNow tell the system to run this service on bootup:\n```\nsudo systemctl enable my_program.service\n```\nAll set! You can try rebooting now to see it work.\n\nOr manually run it with this command:\n```\nsudo service my_program start\n```\nIf you want to stop the service from running on bootup, disable it with this command:\n```\nsudo systemctl disable my_program.service\n```\nAnd to manually stop it once it's running, use this command:\n```\nsudo service my_program stop\n```\nYou can check the status of your service with this command:\n```\nsudo service my_program status\n```\nIf you'd like to better understand the service configuration file, see the .service config manual.\n" } ]
2
Cyl3el2Cleal2/ResamplingPython
https://github.com/Cyl3el2Cleal2/ResamplingPython
6c2872416cbec736acb4a50c9231dd0d5e3fd180
2979c4d06d1d25c1de8cabbaf1cd1580aae6db35
6b68a8ee16182cf3678320ff6ac7eec32f22c03e
refs/heads/master
2021-05-21T21:22:40.423908
2020-04-03T15:22:02
2020-04-03T15:22:02
252,806,823
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6023778319358826, "alphanum_fraction": 0.6155878305435181, "avg_line_length": 22.956043243408203, "blob_id": "2f2457c6e14eda76ca619a440a3a7bbfd2441cd3", "content_id": "7ac30662d1be7755d88af01dd3f77b4214033a84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2271, "license_type": "no_license", "max_line_length": 87, "num_lines": 91, "path": "/2re.py", "repo_name": "Cyl3el2Cleal2/ResamplingPython", "src_encoding": "UTF-8", "text": "from scipy.io import wavfile\r\nimport os.path\r\nfrom os import path\r\nfrom pydub import AudioSegment\r\nimport scipy.signal as sps\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport librosa\r\nimport sys\r\n\r\nargs = sys.argv\r\nargs = args[1:]\r\n\r\n# Your new sampling rate\r\nnew_rate = 3200\r\nif(len(args) == 0):\r\n print(\"Please enter filename\")\r\n quit()\r\nif(not path.exists(args[0])):\r\n print(\"File not found\")\r\n quit()\r\nif(len(args) == 1):\r\n print(\"Downsampling to default: {}\".format(new_rate))\r\nif(len(args) == 2):\r\n try:\r\n float(args[1])\r\n new_rate = int(args[1])\r\n except Exception as err:\r\n print(err)\r\n\r\n\r\n\r\nfilename = args[0]\r\nsrc = filename\r\n\r\ndata = 0\r\nsampling_rate = 0\r\nfileType = src[-4:]\r\n# Read file\r\nif(fileType == '.wav'):\r\n sampling_rate, data = wavfile.read(src)\r\n print(data)\r\n print(type(data))\r\n print(sampling_rate)\r\n plt.figure(figsize=(12,4))\r\n plt.plot(data)\r\n plt.show()\r\nelif(fileType == '.mp3'):\r\n data, sampling_rate = librosa.load(path=src)\r\n # print(data)\r\n # print(type(data))\r\n # print(sampling_rate)\r\n toWav = AudioSegment.from_mp3(src)\r\n data = toWav.get_array_of_samples()\r\n data = np.array(data)\r\n if toWav.channels == 2: data = data.reshape((-1, 2))\r\n sampling_rate = toWav.frame_rate\r\n \r\n print(data)\r\n print(type(data))\r\n print(sampling_rate)\r\n plt.figure(figsize=(12,4))\r\n plt.plot(data)\r\n plt.show()\r\nelse:\r\n print(\"Support only mp3 and wav formats\")\r\n quit()\r\n\r\n# print(\"Original :\\n{}\".format(data))\r\n# Resample data\r\nnumber_of_samples = round(len(data) * float(new_rate) / sampling_rate)\r\n\r\nprint(number_of_samples)\r\nprint(type(data))\r\nprint(data.shape)\r\ntry:\r\n data = sps.resample(data, number_of_samples)\r\n data = np.asarray(data, dtype=np.int16)\r\n # print(\"Processed :\\n{}\".format(data))\r\nexcept:\r\n print(\"Error: Can't process file!\")\r\n quit()\r\n\r\ntry:\r\n # data = np.asarray(data, dtype=np.float)\r\n # librosa.output.write_wav(\"./output/mini_{}.wav\".format(src[:-4]), data, new_rate)\r\n\r\n wavfile.write(\"output/mini_{}\".format(src), new_rate, data)\r\n print(\"File saved to output/mini_{}.wav with Sampling at {}\".format(src, new_rate))\r\nexcept:\r\n print(\"Error: Can't write file!\")\r\n" }, { "alpha_fraction": 0.5466101765632629, "alphanum_fraction": 0.5572034120559692, "avg_line_length": 23.894737243652344, "blob_id": "1a6f568600292fa5844764971fea717f25570cad", "content_id": "8ed21835b2dc1ee725a6816b49f8f531ff47a051", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 472, "license_type": "no_license", "max_line_length": 73, "num_lines": 19, "path": "/ploter.py", "repo_name": "Cyl3el2Cleal2/ResamplingPython", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy\n\ndef show(name, data):\n ''' \n name = windown title, time = graph spacing, data = read wave file\n '''\n try:\n dat = data.astype(numpy.uint16)\n dat.setflags(write=1)\n # Time = numpy.linspace(0, len(data)/fs, num=len(data))\n plt.figure(1)\n plt.title(name)\n # plt.plot(Time, dat)\n plt.plot(data)\n plt.show()\n return True\n except:\n return False" }, { "alpha_fraction": 0.5645893812179565, "alphanum_fraction": 0.572843611240387, "avg_line_length": 29.683544158935547, "blob_id": "36fd7400568e61444bf61f06f27c097227fc2044", "content_id": "f05d2f316ce5d034bd81e1a82a82183e396fd5c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2423, "license_type": "no_license", "max_line_length": 87, "num_lines": 79, "path": "/reducer.py", "repo_name": "Cyl3el2Cleal2/ResamplingPython", "src_encoding": "UTF-8", "text": "import wave\nimport numpy as np\nimport scipy.signal as sps\nfrom scipy.io import wavfile\nimport ploter as ploter\n\nfile = \"simple.wav\"\nfname = \"reduced_\" + file\n\nclass DownSample():\n def __init__(self):\n self.in_rate = 44100.0\n self.out_rate = 22050.0\n\n def open_file(self, file):\n try:\n self.in_wav = wave.open(file, 'r')\n except:\n print(\"Cannot open wav file (%s)\" % str(file))\n return False\n\n if self.in_wav.getframerate() < self.out_rate:\n print(\"Error: Output rate > Input rate. File size will increase!!\")\n print(self.in_wav.getframerate())\n return False\n\n print(self.in_wav.getframerate())\n self.in_rate = self.in_wav.getframerate()\n self.in_nframes = self.in_wav.getnframes()\n print(\"Frames: %d\" % self.in_wav.getnframes())\n\n if self.in_wav.getsampwidth() == 1:\n self.nptype = np.uint8\n elif self.in_wav.getsampwidth() == 2:\n self.nptype = np.uint16\n \n\n return True\n \n def resample(self, fname):\n self.out_wav = wave.open(fname, \"w\")\n self.out_wav.setframerate(self.out_rate)\n self.out_wav.setnchannels(self.in_wav.getnchannels())\n self.out_wav.setsampwidth(self.in_wav.getsampwidth())\n # self.out_wav.setnframes(1)\n\n # print(\"Nr output chanels: %d\" % self.out_wav.getnchannels())\n\n audio = self.in_wav.readframes(self.in_nframes)\n # audio = self.in_wav\n ploter.show('test', audio)\n nroutsamples = round((len(audio) * self.out_rate / self.in_rate))\n print(\"Nr output samples: %d\" % nroutsamples)\n try:\n # ploter.show(\"Before\", self.in_wav)\n self.in_wav.close()\n # audio_out = sps.resample(np.fromstring(audio, self.nptype), nroutsamples)\n audio_out = sps.resample(audio, nroutsamples)\n # audio_out = audio_out.astype(self.nptype)\n\n # self.out_wav.writeframes(audio_out.copy(order='C'))\n self.out_wav.writeframesraw(audio_out.copy(order='C'))\n\n self.out_wav.close()\n print(\"Saved file\")\n except:\n print(\"Failed to resample\")\n return False\n return True\n\ndef main():\n ds = DownSample()\n if not ds.open_file(file):\n return 1\n ds.resample(fname)\n return 0\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.78125, "alphanum_fraction": 0.78125, "avg_line_length": 6.5, "blob_id": "c9d52e971a5c2b62e7d4f716081e0cee3ce46a60", "content_id": "c1b2ad3bda9aec2de6e760b44726b612a8667cdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 32, "license_type": "no_license", "max_line_length": 10, "num_lines": 4, "path": "/requirement.txt", "repo_name": "Cyl3el2Cleal2/ResamplingPython", "src_encoding": "UTF-8", "text": "scipy \r\nnumpy\r\nmatplotlib\r\npydub" } ]
4
meablanche/coin-packing
https://github.com/meablanche/coin-packing
5090207dcef9b9cb86ef1c2e19e0366d7d34382f
14c94869421861cec5cdbfb858a5a1572e704a63
8da151e194191bc5dd22496f4d296b016bebcb61
refs/heads/master
2020-12-31T04:56:27.741241
2016-05-07T21:23:49
2016-05-07T21:23:49
58,285,793
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5723414421081543, "alphanum_fraction": 0.5969195365905762, "avg_line_length": 29.514999389648438, "blob_id": "9da5caa092e0f3fa0e523c790fa0e65f3fbe8d50", "content_id": "39605fbf3a583d5875deee0b8134caad2bf3df89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6439, "license_type": "no_license", "max_line_length": 84, "num_lines": 200, "path": "/pennypack.py", "repo_name": "meablanche/coin-packing", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# using R.L. Grahan and N.J.A. Sloane's equations\n\n# COMMENTS {{{1\n# REVIEW/PREFACE {{{2\n# Suppose the pennies have diameter d, let P₁,...,P₂ be their centers and\n# P̄=n⁻¹ΣPᵢ is their centroid. Then the problem is to choose\n# points P₁,...,P₂ so as to satisfy:\n# (1) ‖Pᵢ- Pⱼ‖ ≥ d ; for i,j=1...,n ; i≠j\n#\n# and so that the 'second moment':\n# (2) 𝑈 = 1/d² Σ(i=1 to n)(‖Pᵢ- P̄‖²)\n# centroid P̄ = n⁻¹ΣPᵢ\n# is minimized, where ‖ ‖ is the Euclidean distance.\n# Euclidean distance: ‖q-p‖ = √((q-p)*(q-p)) = √((q₁-p₁)²+(q₂-p₂)²)\n#\n# let 𝑈(n) be the minimal value of 𝑈\n# a set of points 𝒫 ={P₁,...,Pₙ} satisfying (1) is \n# called an n-point packing, and is optimal if it attains 𝑈(n)\n\n# Greedy Algorithm\n# A sequence of packings 𝒫 ₁, 𝒫 ₂, 𝒫 ₃... is produced by the\n# greedy algorithm if:\n# (a) 𝒫 ₁ contains a single point, and\n# (b) for n=2,3,...,\n# 𝒫 ₙ = 𝒫 ₙ₋₁ ∪ {Pₙ}\n# minimizes over 𝑈 all choices of Pₙ satisfying (1)\n#\n# remember: greedy algorithms are where if you're minimizing cost,\n# the greedy algorithm chooses the element with the least cost,\n# and if you're trying to maximize a cost, it picks the element\n# with the greatest cost, per iteration.\n\n# PLAN {{{2\n# quick reference:\n# (1) ‖Pᵢ- Pⱼ‖ ≥ d ; for i,j=1...,n ; i≠j\n# Euclidean distance: ‖q-p‖ = √((q-p)*(q-p)) = √((q₁-p₁)²+(q₂-p₂)²)\n# (2) 𝑈 = 1/d² Σ(i=1 to n)(‖Pᵢ- P̄‖²)\n# centroid P̄ = n⁻¹ΣPᵢ\n#\n# so.... plan of attack...\n# 1) create the first packing, 𝒫 ₁, by choosing some point\n#\n# 2) create the subsequent packings for n=2,3,...\n# so for the next iteration n,\n# we must pick the next point Pₙ such that:\n#\n# i ) Pₙ must satisfy (1) (for all n really)\n# ii) the packing of n, 𝒫 ₙ = 𝒫 ₙ₋₁ ∪ {Pₙ},\n# ie: 𝒫 ₙ is equal to the union of the existing\n# packing, 𝒫 ₙ₋₁, union'd with the new point Pₙ\n# will minimize 𝑈 over all Pₙ already in the set\n# ie: 𝑈 is the sum of all the euclidean distances from\n# each point P to the centroid of the blob P̄,\n# minimizing this means that we are adding a new\n# point Pₙ such that the centroid P̄ does not move\n# very far. iow, addings circles like a flower\n#\n# 3) repeat 2 until the boundary condition is met\n#\n# quick ver of plan\n# must pick new point such that (1) that also minimizes (2)\n\n#}}}1\n\n# CODE {{{1\n\n# HEADERS AND CONSTANTS {{{2\n# import\nimport numpy as np\nfrom scipy.optimize import minimize\nimport timeit\n\n\n# CONSTANTS\nALLOCATEARRAYELEMENTS = 100000\n\n\n\n# VARIABLES {{{2\n# initial guess\nx0 = np.array([5,5])\n# diameter, keep constant\nØ = 5\n# list of points, P₁,...,Pₙ\n# preallocate space, as numpy arrays copy entire array when appending\n#𝒫 = np.zeros(shape=(ALLOCATEARRAYELEMENTS,2))\n𝒫 = np.zeros(shape=(5,2))\n# n, the current point number, starts at 1!!! NOT ZERO INDEX!!!\nn = 1\n# the centroid of the packing\nP̄ = np.array([0,0])\n\n# FUNCTIONS {{{2\n# centroid function\nf_P̄ = lambda: 𝒫 [1:n].sum()/n\n\n# optimization is where we try to find the best values for the\n# objective function, since we are trying to find the best Pₙ such\n# that (2) is minimal, (2) is the objective function\n# sometimes also called the cost fuction. The solution that produces\n# a minimum (or maximum) for the objective function is called the optimal solution\n# note: I simplified it algebraically and with vectorizations\n# to greatly save time\ndef 𝑈 (newP):\n print('%-38.38s' % newP, end=''); print(' ', end='')\n global 𝒫 ,P̄ # must use global to modify\n 𝒫 [n] = newP # 'temporarily' add new point P to the list to compute\n # ie: 𝒫 ₙ = 𝒫 ₙ₋₁ ∪ {Pₙ}\n # evaluate new centroid\n P̄=f_P̄()\n print('@: ', end='')\n print(P̄)\n print(𝒫 [1:n])\n return ((𝒫 [1:n]-P̄)**2).sum()/n**2\n#U = lambda p, nn: ((p-P̄)**2).sum()/nn**2\n\n# Euclidean distance function minus diameter, the function (1), check if ≥ 0\n#D = lambda p, q: np.sqrt(((q-p)**2).sum())-Ø\ndef D(newP):\n print(' D', end='')\n print('%-36.36s' % newP, end=''); print(' ', end='')\n # iterate through the list of points 𝒫\n for p in 𝒫 [1:n-1]:\n #print(p)\n # check that the new point is not overlapping with any existing points\n if np.sqrt(((newP-p)**2).sum())-Ø < 0:\n # distance is less than diameter, return failure\n #print(\"NOPE\")\n #print(𝒫 [1:n])\n print('! ', end=''); print(np.sqrt(((newP-p)**2).sum())-Ø)\n return -1\n print('.')\n # no overlaps, return success\n return 0\n\ndef construct_jacobian(func, epsilon):\n def jac(x, *args):\n x0 = np.asfarray(x)\n f0 = np.atleast_1d(func(*((x0,)+args)))\n jac = np.zeros([len(x0),len(f0)])\n dx = np.zeros(len(x0))\n for i in range(len(x0)):\n dx[i] = epsilon\n jac[i] = (func(*((x0+dx,)+args)) - f0)/epsilon\n dx[i] = 0.0\n\n return jac.transpose()\n return jac\n\n# OPTIMIZATION CONSTRAINTS {{{2\n# the constraints\ncons = ({'type': 'eq', 'fun': D})\n# the bounds, new point must stay within the bounding shape\nbnds = ((2, None), (2, None))\n\n\n# MAIN {{{2\n\n\n# evaluate new centroid\nP̄=f_P̄()\n# place first point, P₁\n𝒫 [n] = np.array([0,0])\n\n\nprint(𝒫 [1:n])\nprint()\n\nn += 1\n\nfor limit in range(3):\n print(n, end=''); print(', ', end=''); print(P̄)\n # find new point for maximum packing\n res = minimize(𝑈, x0, method='SLSQP', bounds=bnds, constraints=cons,\n jac=construct_jacobian(𝑈, 1e-4),\n options={'disp': True}) #, 'eps': 1e0})\n #res = minimize(𝑈, x0, method='SLSQP', bounds=bnds, constraints=cons,\n # options={'disp': True}) #, 'eps': 1e0})\n #minimize(𝑈, np.array([1, 1]), method='SLSQP', bounds=bnds, constraints=cons)\n\n # increment n for finding next new point in the next iteration\n print(𝒫 [1:n]); print()\n n += 1\n\nprint(\"\\nEND\")\nprint(𝒫)\nprint()\nprint(res)\n\n\n#}}}1\n\n\n# notes to self...\n# line 175, the prints, they aren't printing what I expect them to?\n# try fix that and see where it takes you, the 𝒫 [1:n] business\n# seem a bit off now\n" } ]
1
novalabsxyz/geckoboard-python
https://github.com/novalabsxyz/geckoboard-python
11963651650e5b09deb4db99bb7189c96f8624c1
14f9e2b2b082b28bea544d45557890cb1a0d9239
0fdd54ee2d2e294bac94078eb4fecf4f372a7806
refs/heads/master
2022-05-02T13:08:17.015528
2017-01-24T19:08:33
2017-01-24T19:08:33
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6194690465927124, "alphanum_fraction": 0.6371681690216064, "avg_line_length": 15.703703880310059, "blob_id": "92c14e37935b709320a6032ec5e64154ee1eee21", "content_id": "b3827d379cf1219fd35a12571a684b8175c58320", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 452, "license_type": "no_license", "max_line_length": 55, "num_lines": 27, "path": "/tox.ini", "repo_name": "novalabsxyz/geckoboard-python", "src_encoding": "UTF-8", "text": "[tox]\nenvlist = py27, py35\nskip_missing_interpreters = True\n\n[base]\ndeps =\n pytest\n pytest-cov\n betamax\n betamax_serializers\n betamax_matchers\n flake8\n flake8_docstrings\n flake8_future_import\n\n[testenv]\ncommands =\n py.test --cov geckoboard --cov-report term []\npassenv =\n GECKO_API_KEY\n GECKO_RECORD_MODE\n TRAVIS\ndeps = {[base]deps}\n\n[testenv:lint]\ncommands =\n flake8 {posargs:geckoboard}\n\n" }, { "alpha_fraction": 0.5209923386573792, "alphanum_fraction": 0.5209923386573792, "avg_line_length": 19.153846740722656, "blob_id": "27e08e1887d96f6b864078d2e1abbd9c9a63f8dd", "content_id": "35655546e9e0805b0c00be20d67d659a7d9f6d73", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 524, "license_type": "permissive", "max_line_length": 62, "num_lines": 26, "path": "/geckoboard/__init__.py", "repo_name": "novalabsxyz/geckoboard-python", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nfrom .session import Session\nfrom .dataset import Dataset, Field\nfrom .__about__ import (\n __package_name__, __title__, __author__, __author_email__,\n __license__, __copyright__, __version__, __revision__,\n __url__,\n)\n\n\n__all__ = (\n Session,\n Dataset,\n Field,\n # Metadata attributes\n '__package_name__',\n '__title__',\n '__author__',\n '__author_email__',\n '__license__',\n '__copyright__',\n '__version__',\n '__revision__',\n '__url__',\n)\n" }, { "alpha_fraction": 0.4796547591686249, "alphanum_fraction": 0.5252774357795715, "avg_line_length": 26.03333282470703, "blob_id": "ec4b04a4f129abe5ce95691cbfe0d60fd48632d2", "content_id": "605357cb6079d63149223a88c8cf05b460f9dcb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 70, "num_lines": 30, "path": "/tests/test_session.py", "repo_name": "novalabsxyz/geckoboard-python", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\n\ndef test_push(session):\n # We use a temporary created RAG widget if you need to re-record\n # this:\n #\n # Set GECKO_RECORD_MODE=once and GECKO_API_KEY to a valid api key\n #\n # Remove the tests/casettes/test_session.test_push.json casette\n #\n # Set widget below to the id of a new (legacy) RAG widget\n #\n # Run the test. A new casette will be created. You can then safely\n # delete the above RAG widget\n widget = '120885-142a61f0-74a2-0134-3615-22000b5980c2'\n data = {\n 'item': [\n {\n 'value': 20,\n 'text': 'Overdue'\n },\n {},\n {\n 'value': 80,\n 'text': 'Good'\n },\n ]\n }\n session.push(widget, data)\n" }, { "alpha_fraction": 0.6719242930412292, "alphanum_fraction": 0.6728255748748779, "avg_line_length": 30.253520965576172, "blob_id": "d0159a05e02e1edb99472bd77736f407b764d59b", "content_id": "de9c358f6ba52b4b026c67f08465ac90606c5d47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2219, "license_type": "no_license", "max_line_length": 72, "num_lines": 71, "path": "/tests/conftest.py", "repo_name": "novalabsxyz/geckoboard-python", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport os\nimport pytest\nfrom betamax import Betamax\nfrom betamax_serializers import pretty_json\nfrom betamax_matchers import json_body\n\nfrom requests.auth import _basic_auth_str\nfrom geckoboard import Session, Dataset, Field as F\n\nBetamax.register_serializer(pretty_json.PrettyJSONSerializer)\nBetamax.register_request_matcher(json_body.JSONBodyMatcher)\nAPI_TOKEN = os.environ.get('GECKO_API_KEY', 'X' * 10)\nRECORD_MODE = os.environ.get('GECKO_RECORD_MODE', 'none')\nRECORD_FOLDER = os.environ.get('GECKO_RECORD_FOLDER', 'tests/cassettes')\n\nwith Betamax.configure() as config:\n config.cassette_library_dir = RECORD_FOLDER\n record_mode = RECORD_MODE\n cassette_options = config.default_cassette_options\n cassette_options['record_mode'] = record_mode\n cassette_options['serialize_with'] = 'prettyjson'\n config.define_cassette_placeholder('<AUTH_TOKEN>',\n _basic_auth_str(API_TOKEN, None))\n config.define_cassette_placeholder('<AUTH_TOKEN>', API_TOKEN)\n\n\[email protected]\ndef recorder(request):\n \"\"\"Generate and start a recorder using a geckoboard.Session.\"\"\"\n cassette_name = ''\n\n if request.module is not None:\n cassette_name += request.module.__name__ + '.'\n\n if request.cls is not None:\n cassette_name += request.cls.__name__ + '.'\n\n cassette_name += request.function.__name__\n\n session = Session(API_TOKEN)\n recorder = Betamax(session)\n\n matchers = ['method', 'uri']\n recorder.use_cassette(cassette_name,\n match_requests_on=matchers)\n recorder.start()\n request.addfinalizer(recorder.stop)\n return recorder\n\n\[email protected]\ndef session(recorder):\n \"\"\"Return the session object used by the current recorder.\"\"\"\n return recorder.session\n\n\[email protected]\ndef tmp_dataset(session):\n fields = {\n 'date': F.date('Date'),\n 'datetime': F.datetime('Date Time'),\n 'number': F.number('Number'),\n 'percentage': F.percentage('Percentage'),\n 'string': F.string('String'),\n 'money': F.money('Dollars', 'USD'),\n }\n dataset = Dataset.create(session, 'test', fields)\n yield dataset\n dataset.delete()\n" }, { "alpha_fraction": 0.5730858445167542, "alphanum_fraction": 0.5742459297180176, "avg_line_length": 29.785715103149414, "blob_id": "2723030bbda0f6ef57acc5a2e1d381e8b0819364", "content_id": "bbf4bcb3e71906a061057fa5ad2dac97174f4a8f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 862, "license_type": "permissive", "max_line_length": 72, "num_lines": 28, "path": "/geckoboard/session.py", "repo_name": "novalabsxyz/geckoboard-python", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nimport requests\n\n\nclass Session(requests.Session):\n\n def __init__(self, api_key):\n super(Session, self).__init__()\n self._base_url = 'https://api.geckoboard.com'\n self._push_url = 'https://push.geckoboard.com/v1'\n self._api_key = api_key\n self.auth = requests.auth.HTTPBasicAuth(api_key, None)\n\n def build_url(self, *args, **kwargs):\n parts = [kwargs.get('base_url', self._base_url)]\n parts.extend([part for part in args if part is not None])\n return '/'.join(parts)\n\n def push(self, widget_id, data):\n json = {\n 'api_key': self._api_key,\n 'data': data\n }\n url = self.build_url('send', widget_id, base_url=self._push_url)\n res = self.post(url, json=json)\n res.raise_for_status()\n return True\n" }, { "alpha_fraction": 0.5850903391838074, "alphanum_fraction": 0.6076807379722595, "avg_line_length": 25.039215087890625, "blob_id": "9addfc60579d5ff3fd26d8fe84a8821cf2492467", "content_id": "578dad68bdac703df45fcfda8d8ba459930a7567", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1328, "license_type": "no_license", "max_line_length": 61, "num_lines": 51, "path": "/tests/test_dataset.py", "repo_name": "novalabsxyz/geckoboard-python", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nfrom geckoboard import Dataset, Field as F\nfrom datetime import date, datetime, timedelta\n\n\ndef test_create_delete(session):\n fields = {\n 'date': F.date('Date', unique=True),\n 'datetime': F.datetime('Date Time'),\n 'number': F.number('Number'),\n 'percentage': F.percentage('Percentage'),\n 'string': F.string('String'),\n 'money': F.money('Dollars', 'USD'),\n }\n result = Dataset.create(session, 'test', fields)\n assert result.id == 'test'\n assert result.fields == fields\n\n invalid_field = F(\"invalid\", None)\n\n for field in fields.values():\n assert field != invalid_field\n\n assert result.delete() is True\n\n\ndef _tmp_data(start_date, count):\n def _day_date(day_offset):\n return start_date + timedelta(days=day_offset)\n\n return [{\n 'date': _day_date(offset),\n 'datetime': datetime(2016, 9, 23),\n 'number': 22,\n 'percentage': 0.5,\n 'string': \"test string\",\n 'money': 7.95\n } for offset in range(count)]\n\n\ndef test_replace(tmp_dataset):\n data = _tmp_data(date(2016, 9, 23), 5)\n\n assert tmp_dataset.replace(data) is True\n\n\ndef test_append(tmp_dataset):\n data = _tmp_data(date(2016, 9, 23), 5)\n\n assert tmp_dataset.append(data, delete_by='date') is True\n" }, { "alpha_fraction": 0.5472987294197083, "alphanum_fraction": 0.5507153272628784, "avg_line_length": 29.019229888916016, "blob_id": "6030673017c212d9d814cc1ad4cf3c730ee2ec59", "content_id": "c997c072390ea13b4cb0b10fcbf628f9443d2ff6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4683, "license_type": "permissive", "max_line_length": 76, "num_lines": 156, "path": "/geckoboard/dataset.py", "repo_name": "novalabsxyz/geckoboard-python", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nfrom future.utils import viewitems\nfrom iso4217 import Currency\n\n\nclass Field(object):\n def __init__(self, type, name, currency_code=None, unique=False):\n self.type = type\n self.name = name\n self.currency_code = currency_code\n self.unique = unique\n if currency_code is not None:\n self._money = getattr(Currency, currency_code.lower())\n\n def __eq__(self, other):\n return all([\n self.type == getattr(other, 'type', None),\n self.name == getattr(other, 'name', None),\n self.unique == getattr(other, 'unique', None),\n self.currency_code == getattr(other, \"currency_code\", None)\n ])\n\n def __ne__(self, other):\n return not self == other\n\n @staticmethod\n def date(name, unique=False):\n return Field('date', name,\n unique=unique)\n\n @staticmethod\n def datetime(name, unique=False):\n return Field('datetime', name,\n unique=unique)\n\n @staticmethod\n def number(name, unique=False):\n return Field('number', name,\n unique=unique)\n\n @staticmethod\n def percentage(name, unique=False):\n return Field('percentage', name,\n unique=unique)\n\n @staticmethod\n def string(name, unique=False):\n return Field('string', name,\n unique=unique)\n\n @staticmethod\n def money(name, currency_code, unique=False):\n return Field('money', name,\n currency_code=currency_code,\n unique=unique)\n\n @classmethod\n def from_schema(cls, json, unique=False):\n type = json.get('type')\n name = json.get('name')\n currency_code = json.get('currency_code', None)\n return cls(type, name,\n currency_code=currency_code,\n unique=unique)\n\n def to_schema(self):\n value = {\n 'type': self.type,\n 'name': self.name\n }\n if self.type == 'money':\n value['currency_code'] = self.currency_code\n return value\n\n def to_json(self, value):\n type = self.type\n if type == 'date':\n value = value.isoformat()\n elif type == 'datetime':\n value = value.isoformat() + 'Z'\n elif type == 'money':\n value = value * 10**self._money.exponent\n\n return value\n\n\nclass Dataset(object):\n\n def __init__(self, session, id, fields):\n self._session = session\n self.id = id\n self.fields = fields\n\n @classmethod\n def from_json(cls, session, json):\n id = json.get('id')\n fields = json.get('fields')\n unique_fields = frozenset(json.get('unique_by', []))\n\n def _build_field(name, value):\n unique = name in unique_fields\n return Field.from_schema(value, unique=unique)\n\n fields = {f[0]: _build_field(f[0], f[1]) for f in viewitems(fields)}\n return cls(session, id, fields)\n\n @classmethod\n def create(cls, session, id, fields):\n json = {\n 'fields': {f[0]: f[1].to_schema() for f in viewitems(fields)}\n }\n unique_by = [f[0] for f in viewitems(fields) if f[1].unique]\n if unique_by:\n json['unique_by'] = unique_by\n url = session.build_url('datasets', id)\n response = session.put(url, json=json)\n response.raise_for_status()\n return cls.from_json(session, response.json())\n\n def _build_data_json(self, data):\n def _fields_json(entry):\n fields = self.fields\n return {f[0]: fields[f[0]].to_json(f[1])\n for f in viewitems(entry)}\n\n json = {\n 'data': [_fields_json(entry) for entry in data]\n }\n return json\n\n def replace(self, data):\n session = self._session\n url = session.build_url('datasets', self.id, 'data')\n json = self._build_data_json(data)\n\n response = session.put(url, json=json)\n response.raise_for_status()\n return True\n\n def append(self, data, delete_by=None):\n session = self._session\n url = session.build_url('datasets', self.id, 'data')\n json = self._build_data_json(data)\n if delete_by is not None:\n json['delete_by'] = delete_by\n\n response = session.post(url, json=json)\n response.raise_for_status()\n return True\n\n def delete(self):\n session = self._session\n url = session.build_url('datasets', self.id)\n response = self._session.delete(url)\n response.raise_for_status()\n return True\n" }, { "alpha_fraction": 0.7574981451034546, "alphanum_fraction": 0.7593269944190979, "avg_line_length": 28.717391967773438, "blob_id": "d4455527c1027032002d56b54ca71f469c8bc716", "content_id": "a3f234f7e891e0f4c8582d605c2c9fbfa01b7534", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2734, "license_type": "no_license", "max_line_length": 172, "num_lines": 92, "path": "/README.md", "repo_name": "novalabsxyz/geckoboard-python", "src_encoding": "UTF-8", "text": "# gecko-dataset #\n\n\n[![Build Status](https://travis-ci.org/helium/geckoboard-python.svg?branch=master)](https://travis-ci.org/helium/geckoboard-python)\n[![Coverage Status](https://coveralls.io/repos/github/helium/geckoboard-python/badge.svg?branch=master)](https://coveralls.io/github/helium/geckoboard-python?branch=master)\n[![Code Climate](https://codeclimate.com/github/helium/geckoboard-python/badges/gpa.svg)](https://codeclimate.com/github/helium/geckoboard-python)\n\n`geckoboard-python` is a Python package that makes creating and managing\n[datasets](https://developer.geckoboard.com/api-reference/curl/) for\nthe [Geckboard](https://geckoboard.com) service easier.\n\n\n## Installation\n\nAssuming you're using `pip`, add this line to your `setup.py`:\n\n```\nrequires = ['geckoboard-python]\n```\n\nor to your `requirements.txt` file:\n\n```\ngeckoboard-python\n```\n\n## Usage and Documentation\n\n\n\n\n## Development\n\n\nIn order to develop for this code-base you will need to install tox:\n\n```\npip install tox\n```\n\nSince `geckoboard-python` supports at least Python 2.7 and 3.5 you will\nneed to have at least one of those installed on your development\nmachine. The build automation will test all required versions when\ncode is pushed to this repository.\n\nClone this repository and run:\n\n```\n$ tox\n```\n\nThis will install the required packages and run the tests for the\nlibrary. Installing `tox` removes the need to install or use\n`virtualenv` since tox manages virtual environments already.\n\nBy default the tests will replay back previously recorded API\ninteractions. To affect how tests interact with the API you\nwill have to\n\n* Set a `GECKO_API_KEY` environment variable to a valid Geckboard API\n key. For example in `bash`:\n\n\n```\n$ export GECKO_API_KEY=<my api key>\n```\n\n* Set `GECKO_RECORD_MODE` to one of:\n\n* **none** - (default) Only play back recorded API interactions.\n\n* **once** - Only record interactions for which no recording exist. If\n you get an error message from betamax complaining about a recording\n not matching an interaction that means that your test has new API\n interactions with it. Remove the cassette referred to in the error\n message and run the test again to re-generate it .\n\nWe use `flake8` to ensure we catch Python version differences and\ncommon pitfalls quicker. Please run:\n\n```\n$ tox -e lint\n```\n\nbefore you commit code and try to remove as many warnings as you\ncan. Once we figure out how strict some of the documentation\nrequirements need to be we will be running lint as part of automation.\n\nTo make a release tag the repository with a valid\n[semantic version](https://semver.org) version and push tags. The\nbuild automation will automatically build and push releases to\n[PyPi](https://pypi.python.org).\n" } ]
8
manalco/presentations
https://github.com/manalco/presentations
db482c7a20ed58a14a04870244053cd2f9cde9e5
717e6263149dc5f9b9e64c0897488143c34e9b22
fc36eb2f8a74dc0da2db20253b6c4654876d9f3e
refs/heads/master
2022-04-01T21:39:14.450924
2020-02-08T16:30:01
2020-02-08T16:30:01
220,119,274
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5159574747085571, "alphanum_fraction": 0.5531914830207825, "avg_line_length": 24.68181800842285, "blob_id": "9ae1e94fc348dd7b81edd2351d30619b4c390352", "content_id": "70593f8687be64b27a54cfab04e4b3a6cc8b411c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 564, "license_type": "permissive", "max_line_length": 74, "num_lines": 22, "path": "/steganography/png/decode.py", "repo_name": "manalco/presentations", "src_encoding": "UTF-8", "text": "import sys\nimport cv2\n\nimage = cv2.imread(sys.argv[1])\nbinary_data = \"\"\nfor row in image:\n for pixel in row:\n r = format(pixel[0], \"08b\")\n g = format(pixel[1], \"08b\")\n b = format(pixel[2], \"08b\")\n binary_data += r[-1]\n binary_data += g[-1]\n binary_data += b[-1]\n\nall_bytes = [ binary_data[i: i+8] for i in range(0, len(binary_data), 8) ]\ndecoded_data = \"\"\nfor byte in all_bytes:\n decoded_data += chr(int(byte, 2))\n if decoded_data[-5:] == \"-msg-\":\n break\nprint(\">>> Decoded data:\")\nprint(decoded_data[:-5])" }, { "alpha_fraction": 0.5319567322731018, "alphanum_fraction": 0.5653883814811707, "avg_line_length": 27.25, "blob_id": "26460843e4b132dcef63e46bf5b64c930f9abd4e", "content_id": "00ca7eb76bf65422e040127e4238ed9f19fe8629", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1017, "license_type": "permissive", "max_line_length": 89, "num_lines": 36, "path": "/steganography/png/encode.py", "repo_name": "manalco/presentations", "src_encoding": "UTF-8", "text": "\n# pip3 install opencv-python\n\nimport sys\nimport cv2\n\nclear_text = sys.argv[2]+\"-msg-\"\nmessage = ''.join([ format(ord(i), \"08b\") for i in clear_text ])\n\nimage = cv2.imread(sys.argv[1])\n\ndata_index = 0\ndata_len = len(message)\nmax_bytes = image.shape[0] * image.shape[1] * 3 // 8\nif len(clear_text) > max_bytes:\n\traise ValueError(\">>> ERROR: The selected image is not big enough to hold the message.\")\n\nfor row in image:\n for pixel in row:\n r = format(pixel[0], \"08b\")\n g = format(pixel[1], \"08b\")\n b = format(pixel[2], \"08b\")\n \n if data_index < data_len:\n \tpixel[0] = int(r[:-1] + message[data_index], 2)\n \tdata_index += 1\n if data_index < data_len:\n \tpixel[1] = int(g[:-1] + message[data_index], 2)\n \tdata_index += 1\n \tif data_index < data_len:\n \t\tpixel[2] = int(b[:-1] + message[data_index], 2)\n \t\tdata_index += 1\n \tif data_index >= data_len:\n \t\tbreak\n\ncv2.imwrite(\"output.PNG\", image)\nprint(\">>> Image encoded.\")" }, { "alpha_fraction": 0.6268343925476074, "alphanum_fraction": 0.6436058878898621, "avg_line_length": 18.100000381469727, "blob_id": "dbcf78a397fce0e19b481d7bdb890da73ea1b112", "content_id": "3dbfe7a4493d7337f0ae76ac0672562ffc1c2691", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 956, "license_type": "permissive", "max_line_length": 94, "num_lines": 50, "path": "/steganography/midi/decode.py", "repo_name": "manalco/presentations", "src_encoding": "UTF-8", "text": "#! -*- coding: utf8 -*-\n\n#Última Edición: 26-feb-2017\n#Autor: Manuel Alejandro Alvarado Cobo\n#Email: [email protected]\n\nimport sys\nimport midi\n\narchivo = midi.read_midifile(sys.argv[1])\nmensaje = \"\"\ncanales = {}\ncanal = -1\n\n#Buscar canales usados\nfor i in archivo:\n\tfor j in i:\n\t\ttry:\n\t\t\tj.channel\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tif canales.has_key(j.channel):\n\t\t\t\tcanales[j.channel] += 1\n\t\t\telse:\n\t\t\t\tcanales[j.channel] = 1\n\nfor c in canales:\n\tinicializador = \"\"\n\tfor pista in archivo:\n\t\tfor evento in pista:\n\t\t\tif isinstance(evento, midi.NoteOnEvent) and evento.channel == c and len(inicializador) < 5:\n\t\t\t\tinicializador += chr(evento.data[0])\n\t\t\telse:\n\t\t\t\tif inicializador == \"-msj-\":\n\t\t\t\t\tcanal = c\n\t\t\t\t\tbreak\n\t\tif canal >= 0:\n\t\t\tbreak\n\tif canal >= 0:\n\t\tbreak\n\nfor pista in archivo:\n\tfor evento in pista:\n\t\tif isinstance(evento, midi.NoteOnEvent) and evento.channel == canal:\n\t\t\tmensaje += chr(evento.data[0])\n\nmensaje = mensaje.replace('-msj-', '')\n\nprint \"el mensaje es: '\"+mensaje+\"'\"" }, { "alpha_fraction": 0.6787003874778748, "alphanum_fraction": 0.7364621162414551, "avg_line_length": 54.400001525878906, "blob_id": "184178ce1949c6301b1eba95603517bf48d5bacb", "content_id": "db5fb1e6536593151b0cb2d404abd3ca6b1183c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 280, "license_type": "permissive", "max_line_length": 91, "num_lines": 5, "path": "/README.md", "repo_name": "manalco/presentations", "src_encoding": "UTF-8", "text": "# Presentations\n\n* [Feb 8, 2020] Pycon Colombia, Medellín. [Hiding data within data](steganography).\n* [Nov 6, 2019] Unicatolica, Cali. [Steganography, hiding data within data](steganography).\n* [May 10, 2016] Univalle, Tuluá. [Git, introducción al control de versiones](git).\n" }, { "alpha_fraction": 0.6626809239387512, "alphanum_fraction": 0.6733794808387756, "avg_line_length": 23.461538314819336, "blob_id": "4534fdd02befd6fbb611cf0a0be891b3e61afa29", "content_id": "d7e4a0109edc52f04f330debb1705923cb2e5d60", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1607, "license_type": "permissive", "max_line_length": 95, "num_lines": 65, "path": "/steganography/midi/encode.py", "repo_name": "manalco/presentations", "src_encoding": "UTF-8", "text": "#! -*- coding: utf8 -*-\n\n#Última Edición: 26-feb-2017\n#Autor: Manuel Alejandro Alvarado Cobo\n#Email: [email protected]\n\nimport sys\nimport midi\n\nnombre_archivo = sys.argv[1]\narchivo = midi.read_midifile(nombre_archivo)\npista = midi.Track()\nmensaje = sys.argv[2]\ncanales = {}\ncanal = 0\n\n#Buscar un canal libre\nfor i in archivo:\n\tfor j in i:\n\t\ttry:\n\t\t\tj.channel\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tif canales.has_key(j.channel):\n\t\t\t\tcanales[j.channel] += 1\n\t\t\telse:\n\t\t\t\tcanales[j.channel] = 1\n\nfor c in range(0,15):\n\tif not canales.has_key(c):\n\t\tcanal = c\n\t\tbreak\n\n#sanitanizar el mensaje\nmensaje = mensaje.upper()\nmensaje = mensaje.replace('ñ', 'N')\nmensaje = mensaje.replace('á', 'A')\nmensaje = mensaje.replace('à', 'A')\nmensaje = mensaje.replace('ä', 'A')\nmensaje = mensaje.replace('é', 'E')\nmensaje = mensaje.replace('è', 'E')\nmensaje = mensaje.replace('ë', 'E')\nmensaje = mensaje.replace('í', 'I')\nmensaje = mensaje.replace('ì', 'I')\nmensaje = mensaje.replace('ï', 'I')\nmensaje = mensaje.replace('ó', 'O')\nmensaje = mensaje.replace('ò', 'O')\nmensaje = mensaje.replace('ö', 'O')\nmensaje = mensaje.replace('ú', 'U')\nmensaje = mensaje.replace('ù', 'U')\nmensaje = mensaje.replace('ü', 'U')\nmensaje = \"-msj-\"+mensaje\n\n#codificar el mensaje en el canal elegido\nfor letra in list(mensaje):\n\tnota = midi.NoteOnEvent(tick=0, channel=canal, data=[(ord(letra)), 0])\n\tpista.append(nota)\n\n#escribir un archivo nuevo\narchivo.append(pista)\nnombre_archivo = nombre_archivo.replace('.mid', ' copia.mid')\nmidi.write_midifile(nombre_archivo, archivo)\n\nprint \"El mensaje ha sido codificado en el archivo: \"+nombre_archivo+\" en el canal \"+str(canal)" } ]
5
ryangdar/Ab1755-
https://github.com/ryangdar/Ab1755-
f3d7894b9e6014dc166dd4a3db29d87a318c2d09
4377cd6e6de00c63eeb5854fa270920114406c11
f791024fb57372f1387a7e4c96e37ba6065eec7b
refs/heads/master
2022-12-19T10:58:03.466922
2020-09-05T04:28:29
2020-09-05T04:28:29
285,922,215
1
0
Apache-2.0
2020-08-07T21:16:13
2020-09-05T04:28:36
2020-09-21T21:47:06
HTML
[ { "alpha_fraction": 0.5155062079429626, "alphanum_fraction": 0.7023411393165588, "avg_line_length": 16.63538932800293, "blob_id": "d201c818fd5d2b6a6023fc9dc2f520069a6071d3", "content_id": "f2d1c827420b94658e4ee547b26afb8e9c8e8421", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 6578, "license_type": "permissive", "max_line_length": 41, "num_lines": 373, "path": "/requirements.txt", "repo_name": "ryangdar/Ab1755-", "src_encoding": "UTF-8", "text": "absl-py==0.9.0\naffine==2.3.0\nalabaster==0.7.12\nanaconda-client==1.7.2\nanaconda-navigator==1.9.7\nanaconda-project==0.8.3\nantlr4-python3-runtime==4.8\nappdirs==1.4.4\nappnope==0.1.0\nappscript==1.0.1\nasgiref==3.2.3\nasn1crypto==1.0.1\nastor==0.8.1\nastroid==2.3.1\nastroML==0.4.1\nastropy==3.2.2\natomicwrites==1.3.0\nattrs==19.2.0\nawsebcli==3.18.2\nBabel==2.7.0\nbackcall==0.1.0\nbackports.functools-lru-cache==1.6.1\nbackports.os==0.1.1\nbackports.shutil-get-terminal-size==1.0.0\nbackports.tempfile==1.0\nbackports.weakref==1.0.post1\nbcrypt==3.1.7\nbeautifulsoup4==4.8.0\nbibtexparser==1.1.0\nbiopython==1.77\nbitarray==1.0.1\nbkcharts==0.2\nbleach==3.1.0\nblessed==1.17.9\nbokeh==1.3.4\nboto==2.49.0\nboto3==1.13.21\nbotocore==1.15.49\nBottleneck==1.2.1\nCacheControl==0.12.6\ncached-property==1.5.1\ncachetools==4.1.0\ncement==2.8.2\ncertifi==2020.6.20\ncffi==1.12.3\nchaospy==3.2.10\nchardet==3.0.4\nchart-studio==1.1.0\nchroma-py==0.1.0.dev1\nClick==7.0\nclick-plugins==1.1.1\ncligj==0.5.0\ncloudpickle==1.2.2\nclyent==1.2.2\ncmake==3.17.1\ncolorama==0.4.1\ncolour==0.1.5\ncombo==0.1.0\nconda==4.8.3\nconda-build==3.18.9\nconda-package-handling==1.7.0\nconda-verify==3.4.2\ncontextlib2==0.6.0\ncryptography==2.7\ncvxpy==1.1.1\ncycler==0.10.0\nCython==0.29.13\ncytoolz==0.10.0\ndash==1.13.4\ndash-bio==0.4.8\ndash-core-components==1.10.1\ndash-daq==0.2.1\ndash-html-components==1.0.3\ndash-renderer==1.5.1\ndash-table==4.8.1\ndash.ly==0.17.3\ndask==2.5.2\ndataclasses==0.6\ndeap==1.3.1\ndecorator==4.4.0\ndefusedxml==0.6.0\ndill==0.3.1.1\ndistlib==0.3.1\ndistributed==2.5.2\nDjango==3.0.2\ndjango-htmlmin==0.11.0\ndocker==4.2.2\ndocker-compose==1.25.5\ndocker-py==1.10.6\ndocker-pycreds==0.4.0\ndockerpty==0.4.1\ndocopt==0.6.2\ndocutils==0.15.2\nearthpy==0.9.2\necos==2.0.7.post1\nentrypoints==0.3\nEquation==1.2.1\net-xmlfile==1.0.1\nexdir==0.4.1\nExtensionClass==4.4\nfastcache==1.1.0\nfilelock==3.0.12\nFiona==1.8.13.post1\nFlask==1.1.2\nFlask-Compress==1.4.0\nFlask-SeaSurf==0.2.2\nfsspec==0.5.2\nfuture==0.16.0\ngast==0.2.2\ngeojson==2.5.0\ngeopandas==0.7.0\ngevent==1.4.0\nglob2==0.7\ngmpy2==2.0.8\ngoogle-auth==1.14.1\ngoogle-auth-oauthlib==0.4.1\ngoogle-pasta==0.2.0\ngpflow==2.0.1\nGPy==1.9.9\ngreenlet==0.4.15\ngrpcio==1.28.1\ngunicorn==20.0.4\nh5py==2.9.0\nhdmedians==0.13\nHeapDict==1.0.1\nholoviews==1.12.7\nhtml5lib==1.0.1\nhtmlmin==0.1.12\nidna==2.8\nimageio==2.6.0\nimagesize==1.1.0\nimportlib-metadata==0.23\ninflection==0.5.0\nipykernel==5.1.2\nipypublish==0.10.10\nipython==7.8.0\nipython-genutils==0.2.0\nipywidgets==7.5.1\nisort==4.3.21\nitsdangerous==1.1.0\njdcal==1.4.1\njedi==0.15.1\nJinja2==2.10.3\njmespath==0.10.0\njoblib==0.13.2\njson5==0.8.5\njsonextended==0.7.11\njsonschema==3.0.2\njupyter==1.0.0\njupyter-client==5.3.3\njupyter-console==6.0.0\njupyter-contrib-core==0.3.3\njupyter-contrib-nbextensions==0.5.1\njupyter-core==4.5.0\njupyter-highlight-selected-word==0.2.0\njupyter-latex-envs==1.4.6\njupyter-nbextensions-configurator==0.4.1\njupyterlab==1.1.4\njupyterlab-server==1.0.6\njupytext==1.3.1\nKeras-Applications==1.0.8\nKeras-Preprocessing==1.1.0\nkeyring==18.0.0\nkiwisolver==1.1.0\nlazy-object-proxy==1.4.2\nlibarchive-c==2.8\nlief==0.9.0\nllvmlite==0.32.1\nlocket==0.2.0\nlockfile==0.12.2\nlxml==4.4.1\nMako==1.1.3\nmapboxgl==0.10.2\nMarkdown==3.1.1\nMarkupSafe==1.1.1\nmatplotlib==3.2.1\nmccabe==0.6.1\nmeshio==4.0.16\nmistune==0.8.4\nmkl-fft==1.0.14\nmkl-random==1.1.0\nmkl-service==2.3.0\nmlfinlab==0.12.3\nmock==3.0.5\nmod-wsgi==4.7.1\nmore-itertools==7.2.0\nmpld3==0.3\nmpmath==1.1.0\nmsgpack==0.6.1\nmultipledispatch==0.6.0\nmultiprocess==0.70.9\nmunch==2.5.0\nnatsort==7.0.1\nnavigator-updater==0.2.1\nnbconvert==5.6.0\nnbformat==4.4.0\nnetworkx==2.3\nnltk==3.4.5\nnose==1.3.7\nnotebook==6.0.1\nnumba==0.49.1\nnumexpr==2.7.0\nnumpoly==0.1.16\nnumpy==1.18.5\nnumpy-financial==1.0.0\nnumpydoc==0.9.1\noauthlib==3.1.0\nolefile==0.46\nopen3d==0.10.0.0\nopencv-python==4.2.0.34\nopenpyxl==3.0.0\nopt-einsum==3.2.1\nordered-set==3.1.1\nosqp==0.6.1\npackaging==19.2\npandas==1.0.4\npandas-datareader==0.8.1\npandocfilters==1.4.2\npanflute==1.12.4\nparam==1.9.2\nparamiko==2.7.1\nparamz==0.9.5\nparso==0.5.1\npartd==1.0.0\npath.py==12.0.1\npathlib2==2.3.5\npathspec==0.5.9\npatsy==0.5.1\npep8==1.7.1\npexpect==4.7.0\npickleshare==0.7.5\nPillow==6.2.0\npkginfo==1.5.0.1\nplotly==4.6.0\nplotly-express==0.4.1\nplotlyhtmlexporter==0.0.2\npluggy==0.13.0\nply==3.11\nprometheus-client==0.7.1\nprompt-toolkit==2.0.10\nprotobuf==3.11.3\npsutil==5.6.3\nptyprocess==0.6.0\nPweave==0.30.3\npy==1.8.0\npyasn1==0.4.8\npyasn1-modules==0.2.8\npycode==0.1.2\npycodestyle==2.5.0\npyconsole==0.5\npycosat==0.6.3\npycparser==2.19\npycrypto==2.6.1\npycurl==7.43.0.3\npyerf==1.0.1\npyflakes==2.1.1\npygad==2.4.0\nPygments==2.4.2\npygpu==0.7.6\nPyLaTeX==1.3.1\npylint==2.4.2\nPyNaCl==1.4.0\npyod==0.7.8.1\npyodbc==4.0.27\npyOpenSSL==19.0.0\npyparsing==2.4.2\npyproj==2.6.1.post1\npyrsistent==0.15.4\nPySocks==1.7.1\npyswarms==1.1.0\npytest==5.2.1\npytest-arraydiff==0.3\npytest-astropy==0.5.0\npytest-doctestplus==0.4.0\npytest-openfiles==0.4.0\npytest-remotedata==0.3.2\npython-dateutil==2.8.0\npytz==2019.3\npyvista==0.25.3\npyviz-comms==0.7.2\nPyWavelets==1.0.3\nPyX==0.15\nPyYAML==5.3.1\npyzmq==18.1.0\nQtAwesome==0.6.0\nqtconsole==4.5.5\nQtPy==1.9.0\nQuandl==3.5.2\nrasterio==1.1.5\nRecord==3.5\nrequests==2.22.0\nrequests-oauthlib==1.3.0\nretrying==1.3.3\nrope==0.14.0\nrsa==4.0\nruamel-yaml==0.15.46\nruamel.yaml.clib==0.2.0\ns3transfer==0.3.3\nSALib==1.3.11\nscikit-bio==0.5.6\nscikit-image==0.16.2\nscikit-learn==0.23.1\nscikits.bootstrap==1.0.1\nscipy==1.4.1\nscooby==0.5.6\nscs==2.1.2\nseaborn==0.9.0\nsemantic-version==2.5.0\nSend2Trash==1.5.0\nShapely==1.7.0\nsimplegeneric==0.8.1\nsimpy==3.0.11\nsingledispatch==3.4.0.3\nsix==1.12.0\nsnowballstemmer==2.0.0\nsnuggs==1.4.7\nsortedcollections==1.1.2\nsortedcontainers==2.1.0\nsoupsieve==1.9.3\nSphinx==2.2.0\nsphinxcontrib-applehelp==1.0.1\nsphinxcontrib-devhelp==1.0.1\nsphinxcontrib-htmlhelp==1.0.2\nsphinxcontrib-jsmath==1.0.1\nsphinxcontrib-qthelp==1.0.2\nsphinxcontrib-serializinghtml==1.1.3\nsphinxcontrib-websupport==1.1.2\nspyder==3.3.6\nspyder-kernels==0.5.2\nSQLAlchemy==1.3.9\nsqlparse==0.3.0\nstatsmodels==0.11.1\nsty==1.0.0b12\nsuod==0.0.3\nsympy==1.4\ntables==3.5.2\ntabulate==0.8.7\ntblib==1.4.0\ntensorboard==2.1.1\ntensorflow==2.1.0\ntensorflow-estimator==2.1.0\ntensorflow-probability==0.9.0\ntermcolor==1.1.0\nterminado==0.8.2\ntestpath==0.4.2\ntexttable==1.6.2\nTheano==1.0.4\nthreadpoolctl==2.1.0\ntoolz==0.10.0\ntornado==6.0.3\ntqdm==4.36.1\ntraitlets==4.3.3\nuncertainpy==1.2.1\nunicodecsv==0.14.1\nurllib3==1.24.3\nvirtualenv==20.0.30\nvtk==9.0.1\nwcwidth==0.1.7\nwebencodings==0.5.1\nwebsocket-client==0.57.0\nWerkzeug==0.16.0\nwidgetsnbextension==3.5.1\nwrapt==1.11.2\nwurlitzer==1.0.3\nxlrd==1.2.0\nXlsxWriter==1.2.1\nxlwings==0.15.10\nxlwt==1.3.0\nxvfbwrapper==0.2.9\nxxx==0.0.1\nyellowbrick==1.0.1\nzict==1.0.0\nzipp==0.6.0\n" }, { "alpha_fraction": 0.6399999856948853, "alphanum_fraction": 0.6909090876579285, "avg_line_length": 12.095237731933594, "blob_id": "f0234a0977f6061055a752815797a29ca4b46dbb", "content_id": "398c54ae2e36dfe545dfa7cbbc5e5b0ddf9f1f2a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 275, "license_type": "permissive", "max_line_length": 63, "num_lines": 21, "path": "/Dockerfile", "repo_name": "ryangdar/Ab1755-", "src_encoding": "UTF-8", "text": "# 1 \nFROM python:3.7\n\n# 2\nRUN pip install Flask gunicorn\n\n# 3\nCOPY src/ /app\nWORKDIR /app\n\n# 4\nENV PORT 8080\n\n# 5\nCMD exec gunicorn --bind :$PORT --workers 1 --threads 8 app:app\n\n#6\n\nFROM gcr.io/cloud-builders/gcloud\nCOPY notice.sh /usr/bin\nENTRYPOINT [\"/usr/bin/notice.sh\"]\n" }, { "alpha_fraction": 0.6705819368362427, "alphanum_fraction": 0.6914041638374329, "avg_line_length": 38, "blob_id": "2322c3b0098903478534e8de38640a89cf4a4035", "content_id": "7a613555971b396bd6c9e6827b1342c7fa1b6d45", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1873, "license_type": "permissive", "max_line_length": 165, "num_lines": 48, "path": "/basic_example.py", "repo_name": "ryangdar/Ab1755-", "src_encoding": "UTF-8", "text": "__author__ = 'RD'\n\nimport flask\nimport pandas as pd\nfrom flask import Flask, render_template, request\nfrom searchData import searchData\n# from flask_app import app as application\n\nhtml = \"\"\"\n<! DOCTYPE html>\n<html lang = \"en\">\n<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js\"></script>\n<link rel=\"stylesheet\" type=\"text/css\" href=\"https://cdn.datatables.net/1.10.21/css/jquery.dataTables.min.css\">\n<link rel=\"stylesheet\" type=\"text/css\" href=\"https://cdnjs.cloudflare.com/ajax/libs/semantic-ui/2.3.1/semantic.min.css\">\n<link rel=\"stylesheet\" type=\"text/css\" href=\"https://cdn.datatables.net/1.10.21/css/dataTables.semanticui.min.css\">\n<script type=\"text/javascript\" charset=\"utf8\" src=\"https://cdn.datatables.net/1.10.21/js/jquery.dataTables.js\"></script>\n<script type=\"text/javascript\" charset=\"utf8\" src=\"https://cdn.datatables.net/1.10.21/js/jquery.dataTables.min.js\"></script>\n<script type=\"text/javascript\" charset=\"utf8\" src=\"https://cdn.datatables.net/1.10.21/js/dataTables.semanticui.min.js\"></script>\n<script type=\"text/javascript\" charset=\"utf8\" src=\"https://cdnjs.cloudflare.com/ajax/libs/semantic-ui/2.3.1/semantic.min.js\"></script>\n<script>\n$(document).ready( function () {\n $('#my_id').DataTable();\n} );\n</script>\n<head>\n <meta charset = \"UTF-8\">\n <title>California's Open Water and Ecological Data Results</title>\n</head>\n<body>\n{{table | safe}}\n</body>\n</html>\n\"\"\"\napp = Flask(__name__)\n@ app.route ('/')\n\[email protected]('/')\ndef index():\n return render_template('about.html')\n\[email protected]('/hello', methods=['POST'])\ndef hello():\n first_name = request.form['first_name']\n output = searchData(first_name)\n return flask.render_template_string (html, table = output.to_html (header = 'true', classes =['ui celled table','hover'], table_id = 'my_id', render_links=True))\n\nif __name__ == '__main__':\n app.run()\n\n" }, { "alpha_fraction": 0.6804780960083008, "alphanum_fraction": 0.6912350654602051, "avg_line_length": 36.46268844604492, "blob_id": "341d4db8fc627af97f4e931ee5823ebad1920fd2", "content_id": "9af90ed4c1230b9b614d250ba34075a95302737d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2510, "license_type": "permissive", "max_line_length": 101, "num_lines": 67, "path": "/searchData.py", "repo_name": "ryangdar/Ab1755-", "src_encoding": "UTF-8", "text": "# Use 'from searchData import searchData' in main()\n# Call by inputing a string\n# Can change number of 'rows' returned and result offset 'start'\ndef searchData(searchTerm,rows=1000,start=0):\n\n\t# import json\n\timport requests\n\timport pandas as pd\n\timport numpy as np\n\timport re\n\n\t# import time\n\t# begin = time.time()\n\n\t# from ckanapi import RemoteCKAN\n\t# RemoteCKAN('https://data.cnra.ca.gov/')\n\t# test = demo.action.package_search(q = \"spending water\")\n\t# ckan.logic.action.get.package_search() <-- Need to use this?\n\n\t# Websites we search.\n\tcnraURL = \"https://data.cnra.ca.gov/\"\n\tcaOpenDataURL = \"https://data.ca.gov/\"\n\t# Package search: this is what the default search is on both of the above sites.\n\tpkgSearch = \"api/3/action/package_search?q=\"\n\tsearchTerm = searchTerm.replace(\" \", \"%20\")\n\treturnSize = \"&rows=\" + str(rows) + \"&start=\" + str(start)\n\n\t# Make request from API\n\trCNRA = requests.get(cnraURL + pkgSearch + searchTerm + returnSize)\n\trCAPortal = requests.get(caOpenDataURL + pkgSearch + searchTerm+ returnSize)\n\n\tif(rCNRA.status_code !=200 or rCAPortal.status_code !=200):\n\t\tprint(\"There was an error connecting to the sites.\")\n\t\treturn\n\n\tif(rCNRA.json()['result']['count'] + rCAPortal.json()['result']['count'] == 0):\n\t\tprint(\"No datasets found for: \" + searchTerm)\n\t\tprint(\"Please try another search.\")\n\t\treturn\n\n\tCNRAdf = pd.DataFrame.from_records(rCNRA.json()['result']['results'])\n\tCNRAdf = CNRAdf.loc[:,['title','name','notes','metadata_created']]\n\tCNRAdf['name'] = cnraURL + \"dataset/\" + CNRAdf['name']\n\n\tCAPortaldf = pd.DataFrame.from_records(rCAPortal.json()['result']['results'])\n\tCAPortaldf = CAPortaldf.loc[:,['title','name','notes','metadata_created']]\n\tCAPortaldf['name'] = caOpenDataURL + \"dataset/\" + CAPortaldf['name']\n\n\t# Removing html tags from 'notes' column\n\t# Credit: https://medium.com/@jorlugaqui/how-to-strip-html-tags-from-a-string-in-python-7cb81a2bbf44\n\tclean = re.compile('<.*?>')\n\tfor i in range(len(CNRAdf['notes'])):\n\t CNRAdf['notes'][i] = re.sub(clean, '', CNRAdf['notes'][i])\n\tfor i in range(len(CAPortaldf['notes'])):\n\t CAPortaldf['notes'][i] = re.sub(clean, '', CAPortaldf['notes'][i])\n\n\t# Combine and drop duplicates\n\tcombined = pd.concat([CNRAdf, CAPortaldf]).sort_index(kind='merge')\n\tcombined.drop_duplicates(['title'],inplace=True)\n\tcombined = combined.reset_index()\n\tcombined = combined.drop(['index'], axis='columns')\n\n\t# end = time.time()\n\t# print(str(end-begin))\n\t# This code takes ~5.5s to run for 1000 results from each site\n\n\treturn combined\n" }, { "alpha_fraction": 0.645283043384552, "alphanum_fraction": 0.6830188632011414, "avg_line_length": 14.647058486938477, "blob_id": "a6cc8774598327efffff32155221f340785ba82e", "content_id": "29dbf8650b80a12e706c9f1e9887da41208f10d9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 265, "license_type": "permissive", "max_line_length": 68, "num_lines": 17, "path": "/readme.md", "repo_name": "ryangdar/Ab1755-", "src_encoding": "UTF-8", "text": "#Simple Flask Form\n\n### Usage\n\n- Run the application and go to http://0.0.0.0:3000/ in your browser\n\n#### Configurations\n\n- To install pip: `sudo easy_install pip`\n- To install Flask: `pip install -r requirements.txt`\n\n\n#### Requirements\n\n- Python 2.7\n- pip\n- Flask" } ]
5
nhwhite3118/ThunderBuddy
https://github.com/nhwhite3118/ThunderBuddy
dd9e8ee13bb87274541d3ceb231c74c1c7b513d4
521d5dab1f4041f2b98ea1b333c177ebbc958036
0d33560bce2485afd88bbda3ee3e5f95d6d89e22
refs/heads/master
2021-01-21T04:53:46.622379
2016-06-23T05:06:19
2016-06-23T05:06:19
55,562,793
2
1
null
2016-04-06T00:14:12
2016-04-10T03:08:58
2016-04-10T19:44:52
CSS
[ { "alpha_fraction": 0.747922420501709, "alphanum_fraction": 0.7590027451515198, "avg_line_length": 35.099998474121094, "blob_id": "b7d2901da6530055496b27fa631976db3705b720", "content_id": "8c10fea45f5963a0f4f9bb47296f5f2cc44f5a26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 361, "license_type": "no_license", "max_line_length": 68, "num_lines": 10, "path": "/README.md", "repo_name": "nhwhite3118/ThunderBuddy", "src_encoding": "UTF-8", "text": "# ThunderBuddy\nTexts subscribed users when a thunderstorm is coming. \nSubscribe at http://nhwhite3118.github.io/ThunderBuddy\n\nBuilt by Nick White and [Daniel Robertson](http://www.github.com/danielrobertson) \n\nPowered by these APIs: \n[WeatherUnderground](https://www.wunderground.com),\n[Twilio](https://www.twilio.com/), and\n[Gmail](https://developers.google.com/gmail/api/)\n" }, { "alpha_fraction": 0.6338062882423401, "alphanum_fraction": 0.6460371613502502, "avg_line_length": 31.44444465637207, "blob_id": "f2310956402e938cbe0d64318a757aea3faca1dc", "content_id": "3301ebbddaefec3924e1729cc421e9de035d3202", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4088, "license_type": "no_license", "max_line_length": 111, "num_lines": 126, "path": "/ThunderBuddyResource.py", "repo_name": "nhwhite3118/ThunderBuddy", "src_encoding": "UTF-8", "text": "import config\nfrom flask import Flask\nimport flask\nimport pymysql\nimport zipcode\nfrom twilio.rest.lookups import TwilioLookupsClient\nfrom tornado.wsgi import WSGIContainer\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom datetime import datetime\n\napp = Flask(__name__)\n\ncarrierPortalLookup = {\n \"Verizon Wireless\": \"vtext.com\",\n \"Sprint Spectrum, L.P.\": \"messaging.sprintpcs.com\",\n \"AT&T Wireless\": \"txt.att.net\",\n}\n\n# twilio\nclient = TwilioLookupsClient()\nclient = TwilioLookupsClient()\n\n# database\n#conn = pymysql.connect(host='127.0.0.1', user=config.DB_USER, passwd=config.DB_PASSWORD, db='thunderbuddy')\n#cur = conn.cursor()\n\n\n# subscribes a user by inserting their number into the database\[email protected](\"/api/subscribe/number/<number>/zip/<zip>\", methods=[\"POST\"])\ndef subscribe(number, zip):\n conn = pymysql.connect(host='127.0.0.1', user=config.DB_USER, passwd=config.DB_PASSWORD, db='thunderbuddy')\n cur = conn.cursor()\n try:\n if len(str(int(number))) > 15:\n cur.close()\n conn.close()\n resp = flask.Response(\"Please input a valid number\", status=400)\n resp.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return resp \n except:\n cur.close()\n conn.close()\n resp = flask.Response(\"Please input a valid number\", status=400)\n resp.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return resp \n # allow users to change their location without duplicating in the bases\n cur.execute(\"DELETE FROM user WHERE number=\" + number)\n conn.commit()\n\n zipcodeInfo = zipcode.isequal(zip)\n numberInfo = client.phone_numbers.get(number, include_carrier_info=True)\n carrier = numberInfo.carrier['name']\n # Convert carrier to portal\n portal = \"\"\n if carrier in carrierPortalLookup:\n portal = carrierPortalLookup[carrier]\n else:\n cur.close()\n conn.close()\n resp = flask.Response(\"We are sorry, but ThunderBuddy does not support your carrier\", status=400)\n resp.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return resp\n sql = \"INSERT INTO user(number,city,state,carrier_portal) VALUES(%s,%s,%s,%s)\"\n city = str(zipcodeInfo.city).replace(\" \", \"_\")\n state = str(zipcodeInfo.state)\n v = (str(number), city, state, portal)\n print(str(datetime.now()) + \" Adding user - \" + carrier + \" \" + str(v))\n cur.execute(sql, v)\n conn.commit()\n\n resp = flask.Response(\"Subscribed \" + str(number), status=200)\n resp.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n cur.close()\n conn.close()\n return resp\n\n\n# unsubscribes a user by removing their number from the database\[email protected](\"/api/unsubscribe/number/<number>\", methods=[\"POST\"])\ndef unsubscribe(number):\n conn = pymysql.connect(host='127.0.0.1', user=config.DB_USER, passwd=config.DB_PASSWORD, db='thunderbuddy')\n cur = conn.cursor()\n try:\n if len(str(int(number))) > 15:\n cur.close()\n conn.close()\n resp = flask.Response(\"Please input a valid number\", status=400)\n resp.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return resp\n except:\n cur.close()\n conn.close()\n resp = flask.Response(\"Please input a valid number\", status=400)\n resp.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return resp\n\n print(str(datetime.now()) + \" Removing user - \" + number)\n cur.execute(\"DELETE FROM user WHERE number=\" + number)\n conn.commit()\n\n resp = flask.Response(\"Unsubscribed \" + str(number), status=200)\n resp.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n cur.close()\n conn.close()\n return resp\n\n\n# debugging helpers\[email protected](\"/\")\ndef hello():\n resp = flask.Response(\"Thunder sucks\", status=200)\n resp.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return resp\n\n\n\n\nif __name__ == \"__main__\":\n http_server = HTTPServer(WSGIContainer(app))\n http_server.listen(8000)\n print(str(datetime.now()) + \" Flask started...\")\n IOLoop.instance().start()\n\n#cur.close()\n#conn.close()\n" }, { "alpha_fraction": 0.6218810081481934, "alphanum_fraction": 0.6337172389030457, "avg_line_length": 32.25531768798828, "blob_id": "0a8134a00fb8af01c67d88cc6db1f9777b9735b9", "content_id": "6f04df65b6a526dfbad4ead0bb137014d058d8fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3126, "license_type": "no_license", "max_line_length": 136, "num_lines": 94, "path": "/ThunderBuddy.py", "repo_name": "nhwhite3118/ThunderBuddy", "src_encoding": "UTF-8", "text": "import config\nfrom urllib.request import urlopen\nimport json\nimport pymysql\nimport time\nimport smtplib\n\n# We need to keep track of the number of unique cities we ask weatherunderground\n# for. We get 10 API calls/minute, and 500/day, so we need to keep totals\nwundergroundThisDay = 0\n# We need to cache cities so that our usage scales with unique cities instead\n# of unique users\nforecasts = {}\nconn = pymysql.connect(host=\"127.0.0.1\", user=config.DB_USER, passwd=config.DB_PASSWORD, db=\"thunderbuddy\")\n\n\ndef sendAlerts():\n print(\"time - \" + time.ctime())\n wundergroundThisMinute = 0\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM user\")\n\n for user in cur:\n number = user[0]\n city = user[1]\n state = user[2]\n portal = user[3]\n if city + state not in forecasts:\n # find forecasts only for new cities\n print(\"Checking forecast for location - \" + city)\n wundergroundThisMinute += 1\n if wundergroundThisMinute >= 10:\n # Only make 10 api calls/min\n time.sleep(120)\n wundergroundThisMinute = 0\n forecasts[city + state] = makeForecast(city, state)\n\n if len(forecasts[city + state]) > 0:\n # if there is thunder, message the user\n sendEmailSms(number + \"@\" + portal, forecasts[city + state])\n\n\ndef makeForecast(city, state):\n # Retrieve Weather Underground Data\n f = urlopen(\"http://api.wunderground.com/api/\" + config.WEATHER_UNDERGROUND_API_KEY + \"/forecast/q/\" + state + \"/\" + city + \".json\")\n jsonString = f.read().decode('utf-8')\n parsedJson = json.loads(jsonString)\n forecastList = parsedJson[\"forecast\"][\"txt_forecast\"][\"forecastday\"]\n thunderDays = []\n\n message = \"\"\n for index in range(1, 3): \n day = forecastList[index][\"title\"]\n print(\"Checking forecast time frame - \" + day)\n if containsThunder(forecastList[index]):\n thunderDays.append(day)\n\n if len(thunderDays) > 0:\n # build message and replace last comma with 'and'\n message = \"Thunder coming on \" + \", \".join(thunderDays)[::-1].replace(\",\", \"dna \", 1)[::-1]\n\n f.close()\n return message\n\n\ndef containsThunder(forecastEntry):\n containsThunder = False \n fcttext = forecastEntry[\"fcttext\"].lower()\n fcttextMetric = forecastEntry[\"fcttext_metric\"].lower()\n if \"thunder\" in fcttext and \"thunder possible\" not in fcttext \\\n or \"thunder\" in fcttextMetric and \"thunder possible\" not in fcttextMetric:\n containsThunder = True\n\n return containsThunder\n\n\ndef sendEmailSms(recipient, body):\n gmailUser = '[email protected]'\n gmailPassword = config.EMAIL_PASSWORD\n to = recipient if type(recipient) is list else [recipient]\n\n try:\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\n server.ehlo()\n server.starttls()\n server.login(gmailUser, gmailPassword)\n server.sendmail(gmailUser, to, body)\n server.close()\n print(\"successfully sent email to - \" + str(to))\n except:\n print(\"failed to send email to - \" + str(to))\n\n\nsendAlerts()\n" } ]
3
mvantellingen/py-soap-wsse
https://github.com/mvantellingen/py-soap-wsse
3bf07a699a2a2bfe60b72a34767e0130d5103287
ae01285b9670fe98375312103c0bdf68ea2fe89d
4cf7ef1ff176360cfc1f88de1eace27d722a426d
refs/heads/master
2023-08-24T05:26:24.163025
2017-01-31T21:23:10
2017-01-31T21:23:10
30,937,586
7
13
MIT
2015-02-17T21:02:01
2017-10-27T07:43:42
2017-01-31T21:23:11
Python
[ { "alpha_fraction": 0.7019867300987244, "alphanum_fraction": 0.7019867300987244, "avg_line_length": 31.826086044311523, "blob_id": "6f9acc6a4a943a8dfb7d5b065f81f3212f53908f", "content_id": "af106642a8c6c24acddd49201d6dd5ffb2fbc83f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 755, "license_type": "permissive", "max_line_length": 78, "num_lines": 23, "path": "/src/soap_wsse/suds_plugin.py", "repo_name": "mvantellingen/py-soap-wsse", "src_encoding": "UTF-8", "text": "try:\n from suds.plugin import MessagePlugin\nexcept ImportError:\n raise ImportError(\"The suds WssePlugin requires suds to be installed\")\n\nfrom soap_wsse import sign_envelope, verify_envelope\nfrom soap_wsse.signing import CertificationError\n\n\nclass WssePlugin(MessagePlugin):\n \"\"\"Suds plugin to sign soap requests with a certificate\"\"\"\n\n def __init__(self, filename):\n self.cert_filename = filename\n\n def sending(self, context):\n context.envelope = sign_envelope(context.envelope, self.cert_filename)\n\n def received(self, context):\n if context.reply:\n valid = verify_envelope(context.reply, self.cert_filename)\n if not valid:\n raise CertificationError(\"Failed to verify response\")\n" }, { "alpha_fraction": 0.8095238208770752, "alphanum_fraction": 0.8095238208770752, "avg_line_length": 62, "blob_id": "3b015e46a560ed7454d8664f17d2ae21b3d1d3fa", "content_id": "8502320f45ceeea0f47811349e0fa8537abb6dee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "permissive", "max_line_length": 68, "num_lines": 2, "path": "/src/soap_wsse/__init__.py", "repo_name": "mvantellingen/py-soap-wsse", "src_encoding": "UTF-8", "text": "from soap_wsse.signing import sign_envelope, verify_envelope # NOQA\nfrom soap_wsse.signing import CertificationError # NOQA\n" }, { "alpha_fraction": 0.6178942322731018, "alphanum_fraction": 0.692782998085022, "avg_line_length": 44.46067428588867, "blob_id": "6798f1df97d66dcd17c5b9daf0bd5ac2abe341e9", "content_id": "c1e7fca1f9119dd3db2f9725800837b79ec7464e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4046, "license_type": "permissive", "max_line_length": 371, "num_lines": 89, "path": "/tests/test_suds_plugin.py", "repo_name": "mvantellingen/py-soap-wsse", "src_encoding": "UTF-8", "text": "import os.path\n\nimport pytest\nfrom pretend import stub\n\nfrom soap_wsse import CertificationError\nfrom soap_wsse import suds_plugin\n\n\nKEY_FILE = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), 'soap_wsse_keys.pem')\n\n\nXML=\"\"\"\n<soapenv:Envelope xmlns:mvt=\"http://github.com/mvantellingen\"\n xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\"\n xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\"\n xmlns:soap=\"http://schemas.xmlsoap.org/wsdl/soap/\">\n <soapenv:Header></soapenv:Header>\n <soapenv:Body>\n <mvt:Function>\n <mvt:Argument>OK</mvt:Argument>\n </mvt:Function>\n </soapenv:Body>\n</soapenv:Envelope>\n\"\"\".strip()\n\nXML_SIGNED = \"\"\"\n<soapenv:Envelope xmlns:mvt=\"http://github.com/mvantellingen\" xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\" xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\" xmlns:soap=\"http://schemas.xmlsoap.org/wsdl/soap/\">\n <soapenv:Header>\n <wsse:Security xmlns:wsse=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd\" xmlns:wsu=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd\">\n <wsse:BinarySecurityToken wsu:Id=\"id-e173afc8-2e7f-4d37-9ef5-160b84e210ab\" EncodingType=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary\" ValueType=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3\">MIIEgzCCA2ugAwIBAgIJAMkiZttOMvKxMA0GCSqGSIb3DQEBBQUAMIGHMQswCQYDVQQGEwJOTDETMBEGA1UECBMKR2VsZGVybGFuZDEMMAoGA1UEBxMDRWRlMRAwDgYDVQQKEwdMdWtraWVuMRgwFgYDVQQDEw9wb24ubHVra2llbi5jb20xKTAnBgkqhkiG9w0BCQEWGm0udmFudGVsbGluZ2VuQGx1a2tpZW4uY29tMB4XDTE1MDEyOTE2MDk0OVoXDTE3MTAyNTE2MDk0OVowgYcxCzAJBgNVBAYTAk5MMRMwEQYDVQQIEwpHZWxkZXJsYW5kMQwwCgYDVQQHEwNFZGUxEDAOBgNVBAoTB0x1a2tpZW4xGDAWBgNVBAMTD3Bvbi5sdWtraWVuLmNvbTEpMCcGCSqGSIb3DQEJARYabS52YW50ZWxsaW5nZW5AbHVra2llbi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDC/R9s5eApMoc+B9G9IklGtcjkOAftHmntTrbKGACz9dTRIURCeqtuJuEvvvpAarnFftIb39tAvdrwkZIkmaAUowcD7OHsOJlsojNY7vYxju8Clx45XJ9HcvL6EkHpd2lRuLx9dSfwTfGKtnTaFhJt8ZnLjNSbWK/5IHHvrv/gYtDKhahK6ncBW2k5nXZf6Wn2Rn/RpjkSoQL12Gmyh47EXMvbA9HpxxqDlPWBBNp6hpGhGOkg0EVvRgnKwzkqvTyB6571LsNMVe7U+gkmd9GHxx8t3cloWLG9RSD/Qr0ahQpFPI00c4dHN0a1LG4WCzbFN8mZYr3WSqsx1OUY1eHJAgMBAAGjge8wgewwHQYDVR0OBBYEFKicd7yRgkGBJJoO/s49iSF3N91CMIG8BgNVHSMEgbQwgbGAFKicd7yRgkGBJJoO/s49iSF3N91CoYGNpIGKMIGHMQswCQYDVQQGEwJOTDETMBEGA1UECBMKR2VsZGVybGFuZDEMMAoGA1UEBxMDRWRlMRAwDgYDVQQKEwdMdWtraWVuMRgwFgYDVQQDEw9wb24ubHVra2llbi5jb20xKTAnBgkqhkiG9w0BCQEWGm0udmFudGVsbGluZ2VuQGx1a2tpZW4uY29tggkAySJm204y8rEwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAo43YKvwGepefY3mazx+PUa5OCozHHNtvpZpXRtN/3bwggXZdJqTyJjmlEQBZz/yAyJL5Ar8FtMenR4Ki8E9Esn09L/l2rA0JvLP8IMBZHfqdDM2Za5zJwp541y4jRjcNlVJ57bwby3DlBp8u70wrtGp8PNC1cGLr9Wj61mERjQAeIn4Qv8JBKiuvQ+YiHN5x1baOCxOWYGFlukXiGDcnNse0BC144yBJraoLzCI6VQYSws1n33VyjPPySpDeR2/JdS6ZO1E/yhuqqwXt8p3g3i7brkpxbQYYeBQA0idDBO7bVQOBoXYGbIN2AIrkPnm4zkki1kivh5NP4PakO6TPDQ==</wsse:BinarySecurityToken>\n <Signature xmlns=\"http://www.w3.org/2000/09/xmldsig#\">\n <SignedInfo>\n <CanonicalizationMethod Algorithm=\"http://www.w3.org/2001/10/xml-exc-c14n#\"/>\n <SignatureMethod Algorithm=\"http://www.w3.org/2000/09/xmldsig#rsa-sha1\"/>\n <Reference URI=\"#id-b88aa1ed-1b06-4b40-b110-5c239a6b5ce3\">\n <Transforms>\n <Transform Algorithm=\"http://www.w3.org/2001/10/xml-exc-c14n#\">\n <ec:InclusiveNamespaces PrefixList=\"urn\" xmlns:ec=\"http://www.w3.org/2001/10/xml-exc-c14n#\"/>\n </Transform>\n </Transforms>\n <DigestMethod Algorithm=\"http://www.w3.org/2000/09/xmldsig#sha1\"/>\n <DigestValue>NRPhamBfgxgTWPf6+8Xzga+YMtg=</DigestValue>\n </Reference>\n </SignedInfo>\n <SignatureValue>ms8rrvFl12PTyhZFBBI6l5T9wCCljGamaiVzGS3HcTGw+gQ5YwHSPSuwAAxhSXKi\nkrHVN0zEw6p0HZC5RJR/XFrCLPScsrDQOjIP2pKU3uuKR7wFLFDzygS5yU7qpuSn\nTdoRuqyOgT5lgvDlDBXn534cQHW3yOUVUl3u+QWdrDtVehag17JBDA89db5KiOfW\nReaTUxHjYmYJaEtY4HLr9PGvtS4pEpr+FCvHoq0aKI40BqyGar4G1/8tavWGaFV0\nvMSZrBGvRxqq6Gotjjt47LhlYq3JvpLLLi+SOjF5388LMKZ+gLLapIY0OhvnGEvl\nJvsAu49CHgPAlLdF3wvIgA==</SignatureValue>\n <KeyInfo>\n <wsse:SecurityTokenReference wsse:TokenType=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3\">\n <wsse:Reference ValueType=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3\" URI=\"#id-e173afc8-2e7f-4d37-9ef5-160b84e210ab\"/>\n </wsse:SecurityTokenReference>\n </KeyInfo>\n </Signature>\n </wsse:Security>\n </soapenv:Header>\n <soapenv:Body ns0:Id=\"id-b88aa1ed-1b06-4b40-b110-5c239a6b5ce3\" xmlns:ns0=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd\">\n <mvt:Function>\n <mvt:Argument>OK</mvt:Argument>\n </mvt:Function>\n </soapenv:Body>\n</soapenv:Envelope>\n\"\"\"\n\n\ndef test_plugin_sending():\n plugin = suds_plugin.WssePlugin(KEY_FILE)\n\n context = stub(envelope=XML)\n plugin.sending(context)\n\ndef test_plugin_received():\n plugin = suds_plugin.WssePlugin(KEY_FILE)\n\n context = stub(reply=XML_SIGNED)\n with pytest.raises(CertificationError):\n plugin.received(context)\n\n\ndef test_plugin_received_without_signature():\n plugin = suds_plugin.WssePlugin(KEY_FILE)\n\n context = stub(reply=XML)\n with pytest.raises(CertificationError):\n plugin.received(context)\n" }, { "alpha_fraction": 0.7231149673461914, "alphanum_fraction": 0.7231149673461914, "avg_line_length": 25.09677505493164, "blob_id": "e029c119eeca138621d910545877cd26a4d4088a", "content_id": "dbe691d1ff24264ebd7aac2c563cd8ab3913d437", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 809, "license_type": "permissive", "max_line_length": 100, "num_lines": 31, "path": "/README.rst", "repo_name": "mvantellingen/py-soap-wsse", "src_encoding": "UTF-8", "text": "!! This module is longer supported by me, please use http://docs.python-zeep.org/en/master/wsse.html\n\n\nSOAP-WSSE\n=========\n\nSimply library to sign and verify SOAP XML requests using the\nBinarySecurityToken specification.\n\nImprovements welcome :-)\n\n\n\nContinuous integration status:\n\n.. image:: https://travis-ci.org/mvantellingen/py-soap-wsse.svg?branch=master\n :target: https://travis-ci.org/mvantellingen/py-soap-wsse\n\n\n.. image:: http://codecov.io/github/mvantellingen/py-soap-wsse/coverage.svg?branch=master\n :alt: Coverage\n :target: https://codecov.io/github/mvantellingen/py-soap-wsse\n \n.. image:: https://pypip.in/version/soap_wsse/badge.svg\n :target: https://pypi.python.org/pypi/soap_wsse/\n\n\nReferences\n----------\n - http://vsza.hu/thesis-beta.pdf\n - https://github.com/dnet/SudsSigner\n" }, { "alpha_fraction": 0.6777658462524414, "alphanum_fraction": 0.6777658462524414, "avg_line_length": 25.600000381469727, "blob_id": "b717da9558b4b6451fcd1293f4c795de885e9298", "content_id": "d7e5251a6fded54428f93b0f772fbd5d8c073ee5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 931, "license_type": "permissive", "max_line_length": 70, "num_lines": 35, "path": "/tests/test_soap_wsse.py", "repo_name": "mvantellingen/py-soap-wsse", "src_encoding": "UTF-8", "text": "import os\nimport soap_wsse\n\n\nKEY_FILE = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), 'soap_wsse_keys.pem')\n\n\nXML=\"\"\"\n<soapenv:Envelope xmlns:mvt=\"http://github.com/mvantellingen\"\n xmlns:wsdl=\"http://schemas.xmlsoap.org/wsdl/\"\n xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\"\n xmlns:soap=\"http://schemas.xmlsoap.org/wsdl/soap/\">\n <soapenv:Header></soapenv:Header>\n <soapenv:Body>\n <mvt:Function>\n <mvt:Argument>OK</mvt:Argument>\n </mvt:Function>\n </soapenv:Body>\n</soapenv:Envelope>\n\"\"\".strip()\n\n\ndef test_sign():\n signed_xml = soap_wsse.sign_envelope(XML, KEY_FILE)\n result = soap_wsse.verify_envelope(signed_xml, KEY_FILE)\n assert result is True\n\n\ndef test_sign_failed():\n signed_xml = soap_wsse.sign_envelope(XML, KEY_FILE)\n signed_xml = signed_xml.replace('OK', 'NOT OK!')\n\n result = soap_wsse.verify_envelope(signed_xml, KEY_FILE)\n assert result is False\n" }, { "alpha_fraction": 0.5652921199798584, "alphanum_fraction": 0.6649484634399414, "avg_line_length": 71.625, "blob_id": "5c1d2ba0479fac2e45c69d88fe33c1e7709c22c6", "content_id": "8dbb14fda6d6464e2865a9b1bbc85e5f664f1cb5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "permissive", "max_line_length": 110, "num_lines": 8, "path": "/src/soap_wsse/ns.py", "repo_name": "mvantellingen/py-soap-wsse", "src_encoding": "UTF-8", "text": "\ndsns = ('ds', 'http://www.w3.org/2000/09/xmldsig#') # NOQA\necns = ('ec', 'http://www.w3.org/2001/10/xml-exc-c14n#') # NOQA\nenvns = ('SOAP-ENV', 'http://schemas.xmlsoap.org/soap/envelope/') # NOQA\nwssens = ('wsse', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd') # NOQA\nwssns = ('wss', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#') # NOQA\nwsuns = ('wsu', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd') # NOQA\n\nNSMAP = dict((envns, dsns, wssens, wsuns, wssns))\n" }, { "alpha_fraction": 0.5705196261405945, "alphanum_fraction": 0.5959703326225281, "avg_line_length": 22, "blob_id": "3d4101a7448ccf0b57d96673619b17259d96b5fb", "content_id": "3eb0b5f50e023561a399819db3ccd74c4b3222c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 943, "license_type": "permissive", "max_line_length": 61, "num_lines": 41, "path": "/setup.py", "repo_name": "mvantellingen/py-soap-wsse", "src_encoding": "UTF-8", "text": "from setuptools import find_packages, setup\n\n\ndescription = \"\"\"\nSimply library to sign and verify SOAP XML requests using the\nBinarySecurityToken specification.\n\"\"\".strip()\n\ntests_require = [\n 'pytest>=2.6.0',\n 'pytest-cov>=1.7.0',\n 'pretend>=1.0.0',\n 'suds-jurko>=0.6',\n]\n\nsetup(\n name='soap_wsse',\n version='0.2.2',\n description=description,\n url='https://github.com/mvantellingen/py-soap-wsse',\n author=\"Michael van Tellingen\",\n author_email=\"[email protected]\",\n install_requires=[\n 'dm.xmlsec.binding==1.3.2',\n 'lxml>=3.0.0',\n 'pyOpenSSL>=0.14',\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require},\n entry_points={\n },\n package_dir={'': 'src'},\n packages=find_packages('src'),\n include_package_data=True,\n license='MIT',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n ],\n zip_safe=False,\n)\n" }, { "alpha_fraction": 0.6297422647476196, "alphanum_fraction": 0.6456704139709473, "avg_line_length": 28.89610481262207, "blob_id": "e1f5eb9347c5c99b53b28d42b15411f2762b7a18", "content_id": "aac74a73f6624ddfdf2e6ff228f862e334b0a89c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6906, "license_type": "permissive", "max_line_length": 79, "num_lines": 231, "path": "/src/soap_wsse/signing.py", "repo_name": "mvantellingen/py-soap-wsse", "src_encoding": "UTF-8", "text": "\"\"\"\n soap_wsse.signing\n ~~~~~~~~~~~~~~~~~\n\n Library to sign SOAP requests with WSSE tokens.\n\n\"\"\"\nimport logging\nimport base64\nimport os\nfrom uuid import uuid4\n\nimport dm.xmlsec.binding as xmlsec\nfrom dm.xmlsec.binding.tmpl import Signature\nfrom lxml import etree\nfrom OpenSSL import crypto\n\nfrom soap_wsse import ns\n\n\nlogger = logging.getLogger(__name__)\n\n\nBODY_XPATH = etree.XPath(\n '/SOAP-ENV:Envelope/SOAP-ENV:Body', namespaces=ns.NSMAP)\nHEADER_XPATH = etree.XPath(\n '/SOAP-ENV:Envelope/SOAP-ENV:Header', namespaces=ns.NSMAP)\nSECURITY_XPATH = etree.XPath('wsse:Security', namespaces=ns.NSMAP)\nTIMESTAMP_XPATH = etree.XPath('wsu:Timestamp', namespaces=ns.NSMAP)\n\nC14N = 'http://www.w3.org/2001/10/xml-exc-c14n#'\nXMLDSIG_SHA1 = 'http://www.w3.org/2000/09/xmldsig#sha1'\n\n\ndef ns_id(tagname, suds_ns):\n return '{{{0}}}{1}'.format(suds_ns[1], tagname)\n\nWSU_ID = ns_id('Id', ns.wsuns)\nBINARY_TOKEN_TYPE = (\n 'http://docs.oasis-open.org/wss/2004/01/' +\n 'oasis-200401-wss-x509-token-profile-1.0#X509v3')\n\n\ndef log_errors(filename, line, func, errorObject, errorSubject, reason, msg):\n info = []\n if errorObject != 'unknown':\n info.append('obj=' + errorObject)\n if errorSubject != 'unknown':\n info.append('subject=' + errorSubject)\n if msg.strip():\n info.append('msg=' + msg)\n if info:\n logger.debug('%s:%d(%s)' % (filename, line, func), ' '.join(info))\n\n\nclass CertificationError(Exception):\n pass\n\n\n# Initialize the xmlsec library\nxmlsec.initialize()\nxmlsec.set_error_callback(log_errors)\n\n\nclass SignQueue(object):\n WSU_ID = ns_id('Id', ns.wsuns)\n DS_DIGEST_VALUE = ns_id('DigestValue', ns.dsns)\n DS_REFERENCE = ns_id('Reference', ns.dsns)\n DS_TRANSFORMS = ns_id('Transforms', ns.dsns)\n\n def __init__(self):\n self.queue = []\n\n def push_and_mark(self, element):\n unique_id = get_unique_id()\n element.set(self.WSU_ID, unique_id)\n self.queue.append(unique_id)\n\n def insert_references(self, signature):\n signed_info = signature.find('ds:SignedInfo', namespaces=ns.NSMAP)\n nsmap = {ns.ecns[0]: ns.ecns[1]}\n\n for element_id in self.queue:\n reference = etree.SubElement(\n signed_info, self.DS_REFERENCE,\n {'URI': '#{0}'.format(element_id)})\n transforms = etree.SubElement(reference, self.DS_TRANSFORMS)\n node = set_algorithm(transforms, 'Transform', C14N)\n\n elm = _create_element(node, 'ec:InclusiveNamespaces', nsmap)\n elm.set('PrefixList', 'urn')\n\n set_algorithm(reference, 'DigestMethod', XMLDSIG_SHA1)\n etree.SubElement(reference, self.DS_DIGEST_VALUE)\n\n\ndef sign_envelope(envelope, key_file):\n \"\"\"Sign the given soap request with the given key\"\"\"\n doc = etree.fromstring(envelope)\n body = get_body(doc)\n\n queue = SignQueue()\n queue.push_and_mark(body)\n\n security_node = ensure_security_header(doc, queue)\n security_token_node = create_binary_security_token(key_file)\n signature_node = Signature(\n xmlsec.TransformExclC14N, xmlsec.TransformRsaSha1)\n\n security_node.append(security_token_node)\n security_node.append(signature_node)\n queue.insert_references(signature_node)\n\n key_info = create_key_info_node(security_token_node)\n signature_node.append(key_info)\n\n # Sign the generated xml\n xmlsec.addIDs(doc, ['Id'])\n dsigCtx = xmlsec.DSigCtx()\n dsigCtx.signKey = xmlsec.Key.load(key_file, xmlsec.KeyDataFormatPem, None)\n dsigCtx.sign(signature_node)\n return etree.tostring(doc)\n\n\ndef verify_envelope(reply, key_file):\n \"\"\"Verify that the given soap request is signed with the certificate\"\"\"\n doc = etree.fromstring(reply)\n node = doc.find(\".//{%s}Signature\" % xmlsec.DSigNs)\n if node is None:\n raise CertificationError(\"No signature node found\")\n dsigCtx = xmlsec.DSigCtx()\n\n xmlsec.addIDs(doc, ['Id'])\n signKey = xmlsec.Key.load(key_file, xmlsec.KeyDataFormatPem)\n signKey.name = os.path.basename(key_file)\n\n dsigCtx.signKey = signKey\n try:\n dsigCtx.verify(node)\n except xmlsec.VerificationError:\n return False\n return True\n\n\ndef get_unique_id():\n return 'id-{0}'.format(uuid4())\n\n\ndef set_algorithm(parent, name, value):\n return etree.SubElement(parent, ns_id(name, ns.dsns), {'Algorithm': value})\n\n\ndef get_body(envelope):\n (body,) = BODY_XPATH(envelope)\n return body\n\n\ndef create_key_info_node(security_token):\n \"\"\"Create the KeyInfo node for WSSE.\n\n Note that this currently only supports BinarySecurityTokens\n\n Example of the generated XML:\n\n <ds:KeyInfo Id=\"KI-24C56C5B3448F4BE9D141094243396829\">\n <wsse:SecurityTokenReference\n wsse11:TokenType=\"{{ BINARY_TOKEN_TYPE }}\">\n <wsse:Reference\n URI=\"#X509-24C56C5B3448F4BE9D141094243396828\"\n ValueType=\"{{ BINARY_TOKEN_TYPE }}\"/>\n </wsse:SecurityTokenReference>\n </ds:KeyInfo>\n\n \"\"\"\n key_info = etree.Element(ns_id('KeyInfo', ns.dsns))\n\n sec_token_ref = etree.SubElement(\n key_info, ns_id('SecurityTokenReference', ns.wssens))\n sec_token_ref.set(\n ns_id('TokenType', ns.wssens), security_token.get('ValueType'))\n\n reference = etree.SubElement(sec_token_ref, ns_id('Reference', ns.wssens))\n reference.set('ValueType', security_token.get('ValueType'))\n reference.set('URI', '#%s' % security_token.get(WSU_ID))\n return key_info\n\n\ndef create_binary_security_token(key_file):\n \"\"\"Create the BinarySecurityToken node containing the x509 certificate.\n\n \"\"\"\n node = etree.Element(\n ns_id('BinarySecurityToken', ns.wssens),\n nsmap={ns.wssens[0]: ns.wssens[1]})\n node.set(ns_id('Id', ns.wsuns), get_unique_id())\n node.set('EncodingType', ns.wssns[1] + 'Base64Binary')\n node.set('ValueType', BINARY_TOKEN_TYPE)\n\n with open(key_file) as fh:\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, fh.read())\n node.text = base64.b64encode(\n crypto.dump_certificate(crypto.FILETYPE_ASN1, cert))\n return node\n\n\ndef ensure_security_header(envelope, queue):\n \"\"\"Insert a security XML node if it doesn't exist otherwise update it.\n\n \"\"\"\n (header,) = HEADER_XPATH(envelope)\n security = SECURITY_XPATH(header)\n if security:\n for timestamp in TIMESTAMP_XPATH(security[0]):\n queue.push_and_mark(timestamp)\n return security[0]\n else:\n nsmap = {\n 'wsu': ns.wsuns[1],\n 'wsse': ns.wssens[1],\n }\n return _create_element(header, 'wsse:Security', nsmap)\n\n\ndef _create_element(parent, name, nsmap):\n prefix, name = name.split(':', 1)\n tag_name = '{%s}%s' % (nsmap[prefix], name)\n\n if parent is not None:\n return etree.SubElement(parent, tag_name, nsmap=nsmap)\n else:\n return etree.Element(tag_name, nsmap=nsmap)\n" } ]
8
CS-UTEC/tarea1-github-yumilr
https://github.com/CS-UTEC/tarea1-github-yumilr
d7667ed0a40f35492b23bf27f9fe66212b83505f
f071c18b1cb8baa7a06af70410a95b3e8c3107e2
e87209d82418234fce524970aacd5ed0451bfee0
refs/heads/master
2022-04-24T20:32:33.484702
2020-04-17T18:03:23
2020-04-17T18:03:23
255,245,122
0
0
null
2020-04-13T06:06:13
2020-04-13T23:30:20
2020-04-13T23:39:43
Python
[ { "alpha_fraction": 0.32258063554763794, "alphanum_fraction": 0.6129032373428345, "avg_line_length": 14.5, "blob_id": "9e9516b70a30229a2c387cc9ab6538672149e601", "content_id": "d45301cded433dd11d4b20e19d6a6353a7c4f8b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 31, "license_type": "no_license", "max_line_length": 17, "num_lines": 2, "path": "/README.md", "repo_name": "CS-UTEC/tarea1-github-yumilr", "src_encoding": "UTF-8", "text": "# Lapa Romero, Julisa.\n## 201910200\n" }, { "alpha_fraction": 0.5550661087036133, "alphanum_fraction": 0.5682819485664368, "avg_line_length": 19.636363983154297, "blob_id": "acf86691d7657585eab275bc20af14f2a4a56332", "content_id": "11df517a63ad52157c0b2f9c53584820c33e5ffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 36, "num_lines": 11, "path": "/primo.py", "repo_name": "CS-UTEC/tarea1-github-yumilr", "src_encoding": "UTF-8", "text": "n=int(input('Ingrese número: '))\nif n==0:\n print('El número no es admitido')\nelse:\n for i in range (2,n-1):\n if n/i:\n print('El número no es primo')\n break\n else:\n print('El número es primo')\n break\n" }, { "alpha_fraction": 0.6307692527770996, "alphanum_fraction": 0.6307692527770996, "avg_line_length": 20.66666603088379, "blob_id": "f3d9bb6bd27d20fc4a674c40e2997a0893397bfe", "content_id": "43f9ebe9daf300575d1071efb99a5514fdb4ce83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/hello.py", "repo_name": "CS-UTEC/tarea1-github-yumilr", "src_encoding": "UTF-8", "text": "print('Ingrese nombre: ')\nname= input()\nprint('Hello '+name+'!')\n" } ]
3
JarCreationCityU/ServerDemo
https://github.com/JarCreationCityU/ServerDemo
1c43f6166271c3eaf298bec51d8d16a848a1d8b2
f5bd6bc9f568c893bee15d1c7d81b30c198f7350
d8a3304a4aef321ecdda581b0441b31ced093791
refs/heads/master
2020-04-07T01:56:15.257891
2018-11-17T11:00:54
2018-11-17T11:00:54
157,957,729
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7872340679168701, "alphanum_fraction": 0.8085106611251831, "avg_line_length": 31.538461685180664, "blob_id": "80f805272163caafed272b08849710e88aec0bf4", "content_id": "a0d4ace79e523383ced155e1ba5e66833a4824c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 423, "license_type": "no_license", "max_line_length": 83, "num_lines": 13, "path": "/README.md", "repo_name": "JarCreationCityU/ServerDemo", "src_encoding": "UTF-8", "text": "# ServerDemo\nServer python programming demo\n\n\nThis is a demo created long time ago, this used for distribution before.\n\nExplaination:\n\naccess var is a port \nhttp server library and socketserver library used as there are 2 modules imported. \nHandler used to take control over all request\nport is equal to 8000 which mean this connection is go through with port 8000\nwhole code structure is using those two library mechanics\n" }, { "alpha_fraction": 0.7677724957466125, "alphanum_fraction": 0.7867298722267151, "avg_line_length": 22.44444465637207, "blob_id": "6df9cd7cbb64a691fde89788ff58080db884218b", "content_id": "8d272a1c80abe8a8b5d1d34a980c1706d4c63ec8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 52, "num_lines": 9, "path": "/ServerDemo.py", "repo_name": "JarCreationCityU/ServerDemo", "src_encoding": "UTF-8", "text": "import http.server\nimport socketserver\n\nACCESS = 8000\n\nHandler = http.server.SimpleHTTPRequestHandler\nhttpd = socketserver.TCPServer((\"\",ACCESS), Handler)\nprint(\"Server at ACCESS\" ,ACCESS)\nhttpd.serve_forever()\n" } ]
2
gajduk/MendixPointsReport
https://github.com/gajduk/MendixPointsReport
ff9ac111f75e45b5853be42e4af74218239b157e
3306637c13f66011e78ef8645a1613161d491fae
3c630a3f2398b5a17de0fee08555c381fb349fe9
refs/heads/master
2021-01-21T13:29:34.966516
2017-11-06T16:20:10
2017-11-06T16:20:10
102,129,230
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5939086079597473, "alphanum_fraction": 0.5939086079597473, "avg_line_length": 27.14285659790039, "blob_id": "04e4e577c8a99af7924560ccbfeda8a1f51ea618", "content_id": "34b25d89b1d465bb47af7e4ea40a2c4df16c208e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 63, "num_lines": 7, "path": "/src/logger.py", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "from time import strftime,gmtime\n\ndef log(msg,error=False):\n separator = \"!!ERROR!! : \"\n if not error:\n separator = \":\"\n print strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()),separator,msg\n" }, { "alpha_fraction": 0.5886340141296387, "alphanum_fraction": 0.5959332585334778, "avg_line_length": 30.459016799926758, "blob_id": "93118508d65055607dfdfe69c903f86791fce41f", "content_id": "acc45e10970471bec61751a2c2604dd8b8697d93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1918, "license_type": "no_license", "max_line_length": 127, "num_lines": 61, "path": "/src/phantomjs/phantom_getuserstats.js", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "var system = require('system');\nvar args = system.args;\nif (args.length !== 2) {\n console.error('DisplayName of user expected as only argument.');\n phantom.exit(1);\n}\nvar page = require('webpage').create();\npage.onCallback = function(data) {\n\tif ( data ) {\n\t\tconsole.log(JSON.stringify(data));\n \t\tphantom.exit();\n\t}\n\telse {\n\t\tconsole.error(\"failed while querying data.\");\n \t\tphantom.exit(1);\n\t}\n};\npage.onConsoleMessage = function(msg, lineNum, sourceId) {\n// console.debug('CONSOLE: ' + msg + ' (from line #' + lineNum + ' in \"' + sourceId + '\")');\n};\n//console.debug(\"started \"+args[1]);\nvar attributes = [\"TotalCommunityPoints\",\"TotalForumPoints\",\"TotalAppstorePoints\",\"TotalPlatformPoints\",\"TotalLearningPoints\"];\nvar attributesSimpleNames = [\"Community\",\"Forum\",\"Appstore\",\"Platform\",\"Learning\"];\npage.open('https://developer.mendixcloud.com/link/community', function(status) {\n if (status === \"success\") {\n\t // console.debug(\"opened\");\n\t setTimeout(function() {\n//\t \tconsole.debug(\"injecting\");\n\t \tpage.evaluate(function(displayName,attributes,attributesSimpleNames) {\n \t\t\tmx.data.get( {\n\t\t\t xpath: \"//Gamification.User[DisplayName='\"+displayName+\"']\",\n\t\t\t filter: {\n\t\t\t sort: [[\"Name\", \"asc\"]],\n\t\t\t offset: 0,\n\t\t\t amount: 1,\n\t\t\t attributes: attributes\n\t\t\t },\n\t\t\t callback: function(objs) {\n\t\t\t \t\tif (typeof window.callPhantom === 'function') {\n\t\t\t \t\t\ttry {\n\t\t\t \t\t\t\tvar res = {};\n\t\t\t \t\t\t\tfor ( var i = 0 ; i < attributes.length ; ++i ) {\n\t\t\t \t\t\t\t\tres[attributesSimpleNames[i]] = +objs[0].get(attributes[i]);\n\t\t\t \t\t\t\t}\n\t\t\t \t\t\t\twindow.callPhantom(res);\n\t\t\t \t\t\t}\n\t\t\t \t\t\tcatch (ignored_error){\n\t\t\t \t\t\t\twindow.callPhantom(); \n\t\t\t \t\t\t}\n\t\t\t \t\t}\n\t\t\t }\n\t\t\t});\n\t \t},args[1],attributes,attributesSimpleNames);\n//\t \tconsole.debug(\"done injecting\");\n\t },3000)\n\t}\n\telse {\n\t\tconsole.error(\"Could not open page.\");\n \t\tphantom.exit(1);\n\t}\n});" }, { "alpha_fraction": 0.424783855676651, "alphanum_fraction": 0.5060518980026245, "avg_line_length": 21.532466888427734, "blob_id": "028f58d1430a101c2ad0370d5e6294b2c5a3980a", "content_id": "b8a06c8b87d66d68f5615f7b6f49618dc426d29d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1735, "license_type": "no_license", "max_line_length": 59, "num_lines": 77, "path": "/src/create_mock_data.py", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "from query_points import QueryPoints\nfrom phantomjs.query_points_with_phantomjs import MockQuery\nfrom database_interface import MongoDBInterface\nfrom dates import DatesWithOffset\n\ndef addRecord(Points,DayOffset):\n db = MongoDBInterface(DatesWithOffset(DayOffset))\n query = MockQuery(Points)\n QueryPoints(db,query).queryAndSavePointsForAllUsers()\n\ndef main():\n addRecord({\n \"Platform\": 22,\n \"Appstore\": 13,\n \"Forum\": 10,\n \"Community\": 0,\n \"Learning\": 0\n },-300)\n addRecord({\n \"Platform\": 2022,\n \"Appstore\": 213,\n \"Forum\": 2010,\n \"Community\": 0,\n \"Learning\": 374\n },-30)\n addRecord({\n \"Platform\": 2122,\n \"Appstore\": 313,\n \"Forum\": 2110,\n \"Community\": 0,\n \"Learning\": 374\n },-10)\n addRecord({\n \"Platform\": 2522,\n \"Appstore\": 313,\n \"Forum\": 2210,\n \"Community\": 0,\n \"Learning\": 374\n },-7)\n addRecord({\n \"Platform\": 2522,\n \"Appstore\": 313,\n \"Forum\": 2230,\n \"Community\": 0,\n \"Learning\": 374\n },-5)\n addRecord({\n \"Platform\": 2528,\n \"Appstore\": 318,\n \"Forum\": 2232,\n \"Community\": 0,\n \"Learning\": 374\n },-4)\n addRecord({\n \"Platform\": 2530,\n \"Appstore\": 320,\n \"Forum\": 2232,\n \"Community\": 0,\n \"Learning\": 375\n },-3)\n addRecord({\n \"Platform\": 2540,\n \"Appstore\": 322,\n \"Forum\": 2272,\n \"Community\": 0,\n \"Learning\": 375\n },-1)\n addRecord({\n \"Platform\": 2542,\n \"Appstore\": 325,\n \"Forum\": 2276,\n \"Community\": 2,\n \"Learning\": 376\n },0)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7665639519691467, "alphanum_fraction": 0.7711864113807678, "avg_line_length": 27.844444274902344, "blob_id": "3671c688595b1954e6bf2b2e214679ecda7d3660", "content_id": "68672c940fd3d9d788b4a983d1ee8759eb2287cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1298, "license_type": "no_license", "max_line_length": 101, "num_lines": 45, "path": "/src/dates.py", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "import datetime\n\ndate_formatt = \"%Y-%m-%d\"\ndatetime_formatt = \"%Y-%m-%d %H:%M:%S\"\nbegginingOfTime = datetime.datetime(1970,1,1)\n\nclass Dates(object):\n\n\tdef getCurrentDay(self):\n\t\treturn (datetime.datetime.utcnow() - begginingOfTime).days\n\n\tdef getDayForDate(self,date):\n\t\treturn (date - begginingOfTime).days\n\n\tdef getDateForDay(self,day):\n\t\treturn begginingOfTime+datetime.timedelta(days=day)\n\n\tdef getDatestampForDate(self,date):\n\t\treturn date.strftime(date_formatt)\n\n\tdef getTimestampForDate(self,date):\n\t\treturn date.strftime(datetime_formatt)\n\n\tdef getTimestamp(self):\n\t\treturn self.getTimestampForDate(datetime.datetime.utcnow())\n\nclass DatesWithOffset(Dates):\n\n\tdef __init__(self,offset):\n\t\tself._offset = offset\n\n\tdef getCurrentDay(self):\n\t\treturn super(DatesWithOffset, self).getCurrentDay()+self._offset\n\n\tdef getDayForDate(self,date):\n\t\treturn super(DatesWithOffset, self).getDayForDate(date)+self._offset\n\n\tdef getDateForDay(self,day):\n\t\treturn super(DatesWithOffset, self).getDateForDay(day+self._offset)\n\n\tdef getTimestampForDate(self,date):\n\t\treturn super(DatesWithOffset, self).getTimestampForDate(date+datetime.timedelta(days=self._offset))\n\n\tdef getDatestampForDate(self,date):\n\t\treturn super(DatesWithOffset, self).getDatestampForDate(date+datetime.timedelta(days=self._offset))\n" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6343784928321838, "avg_line_length": 37.826087951660156, "blob_id": "b898ec40295e5f442a684f0bba286b2b5217203e", "content_id": "7c3f2d50d3f8fc70e439cb16b82ef10607c871f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1786, "license_type": "no_license", "max_line_length": 134, "num_lines": 46, "path": "/src/report_generator.py", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "from database_interface import MongoDBInterface\nfrom users_list import UsersListDB\nfrom datetime import datetime,timedelta\ndate_formatt_weekly_report = \"%a %d.%m\"\nclass ReportGenerator:\n\n def __init__(self,db,users_list):\n self._db = db\n self._users_list = users_list\n\n def getLastWeekReport(self):\n last_weekpoints = self._getLastWeekPoints()\n return self._formatLastWeekPointsAaHTMLTable(last_weekpoints)\n\n def _formatLastWeekPointsAaHTMLTable(self,points):\n header = \"<th>Category\\Day</th>\"\n header += \"\".join([\"<th>\"+e.strftime(date_formatt_weekly_report)+\"</th>\" for e in points[\"Dates\"]])\n header = \"<tr>\"+header+\"</tr>\"\n rows = {c:\"\".join(['<td align=\"right\">'+str(round(val, 1))+\"</td>\" for val in points[\"Points\"][c]]) for c in points[\"Points\"]}\n body = \"\".join([\"<tr><td>\"+c+\"</td>\"+rows[c]+\"</tr>\" for c in rows])\n return '<table border=\"1\" cellspacing=\"0\">'+header+body+\"</table>\"\n\n def _getLastWeekPoints(self):\n start_date,end_date = DatesHelper().getLastWeekPeriod()\n lastWeekPoints = self._db.queryPointsForUserForAPeriod(\"Andrej Gajduk\",start_date,end_date)\n categories = sorted([c for c in lastWeekPoints[0][\"DeltaPoints\"]])\n dates = sorted([points[\"Date\"] for points in lastWeekPoints])\n res = {\"Dates\":dates,\"Points\":{}}\n for c in categories:\n res[\"Points\"][c] = [points[\"DeltaPoints\"][c] for points in lastWeekPoints]\n return res\n\n\nclass DatesHelper:\n\n def getLastWeekPeriod(self):\n now = datetime.utcnow()\n return now-timedelta(days=7),now\n\ndef main():\n db = MongoDBInterface()\n users_list = UsersListDB()\n print ReportGenerator(db,users_list).getLastWeekReport()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6594460010528564, "alphanum_fraction": 0.6633522510528564, "avg_line_length": 28.030927658081055, "blob_id": "1b9258f0fd6287484ca8d76290ed9a1296fbdfa5", "content_id": "5000dac516996a6f6f1a3bd127d7702e256e5139", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2816, "license_type": "no_license", "max_line_length": 115, "num_lines": 97, "path": "/src/database_interface.py", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "import pymongo\nfrom urllib import quote_plus\nimport database_credentials\nfrom dates import Dates\n\nclass MongoDBInterface:\n\t#dates interfaces\n\t_dates = ''\n\t#database connector\n\t_db = ''\n\n\tdef __init__(self, dates=Dates(), useTestColl=True):\n\t\tusername = quote_plus(database_credentials.db_username)\n\t\tpassword = quote_plus(database_credentials.db_password)\n\t\tclient = pymongo.MongoClient('mongodb://%s:%[email protected]:42417/mendix-points-report' % (username, password))\n\t\tself._db = client[\"mendix-points-report\"]\n\t\tif useTestColl:\n\t\t\tself._points_coll = self._db[\"MendixPointsMock\"]\n\t\telse:\n\t\t\tself._points_coll = self._db[\"MendixPoints\"]\n\t\tself._dates = dates\n\n\tdef getDB(self):\n\t\treturn self._db\n\n\tdef savePoints(self,DisplayName,Points):\n\t\tcurrentDay = self._dates.getCurrentDay()\n\t\tpreviousPoints,deltaDays = self._getPreviousPointsAndDeltaDays(currentDay,DisplayName)\n\t\tdeltaPoints = {}\n\t\tif previousPoints:\n\t\t\tdeltaPoints = {e:(Points[e]-previousPoints[e])*1.0/deltaDays for e in Points }\n\t\t\tfor day in range(1,deltaDays):\n\t\t\t\tself._points_coll.find_one_and_update(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"Name\": DisplayName,\n\t\t\t\t\t\t\"Day\": currentDay-day\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\t\"$set\": {\n\t\t\t\t\t\t\t\t\t\"DeltaPoints\": deltaPoints,\n\t\t\t\t\t\t\t\t\t\"Inferred\": True,\n\t\t\t\t\t\t\t\t\t\"Timestamp\": self._dates.getTimestampForDate(self._dates.getDateForDay(currentDay-day))\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\tupsert=True)\n\t\tself._points_coll.find_one_and_update(\n\t\t\t{\n\t\t\t\t\"Name\": DisplayName,\n\t\t\t\t\"Day\": currentDay\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"$set\": {\n\t\t\t\t\t\t\t\"TotalPoints\": Points,\n\t\t\t\t\t\t\t\"DeltaPoints\": deltaPoints,\n\t\t\t\t\t\t\t\"Inferred\": deltaDays>1,\n\t\t\t\t\t\t\t\"Timestamp\": self._dates.getTimestamp()\n\t\t\t\t\t\t}\n\t\t\t},\n\t\t\tupsert=True)\n\n\tdef _getPreviousPointsAndDeltaDays(self,Day,DisplayName):\n\t\ttry:\n\t\t\tpreviousPointsRecord = self._points_coll.find({\n\t\t\t\t\"Name\" : DisplayName,\n\t\t\t\t\"Day\": {\"$lt\": Day }\n\t\t\t}).sort([(\"Day\",-1)]).limit(1).next()\n\t\t\treturn previousPointsRecord[\"TotalPoints\"],Day-previousPointsRecord[\"Day\"]\n\t\texcept:\n\t\t\treturn None,None\n\n\tdef queryPointsForUserForSingleDate(self,DisplayName,Date):\n\t\tqueryDay = self._dates.getDayForDate(Date)\n\t\tres = self._points_coll.find_one({\n\t\t\t\t\"Name\" : DisplayName,\n\t\t\t\t\"Day\": queryDay\n\t\t\t})\n\t\tif res:\n\t\t\treturn {\"DeltaPoints\":res[\"DeltaPoints\"],\n\t\t\t\t\t\"Inferred\":res[\"Inferred\"],\n\t\t\t\t\t\"Date\":self._dates.getDateForDay(res[\"Day\"])}\n\t\telse:\n\t\t\treturn None\n\n\tdef queryPointsForUserForAPeriod(self,DisplayName,PeriodStart,PeriodEnd):\n\t\tstartDay = self._dates.getDayForDate(PeriodStart)\n\t\tendDay = self._dates.getDayForDate(PeriodEnd)\n\t\tprint startDay,endDay\n\t\tres = self._points_coll.find({\n\t\t\t\t\"Name\" : DisplayName,\n\t\t\t\t\"Day\": { \"$gte\" : startDay, \"$lt\" : endDay }\n\t\t\t})\n\t\tif res:\n\t\t\treturn [{\"DeltaPoints\":e[\"DeltaPoints\"],\n\t\t\t\t\t\"Inferred\":e[\"Inferred\"],\n\t\t\t\t\t\"Date\":self._dates.getDateForDay(e[\"Day\"])} for e in res]\n\t\telse:\n\t\t\treturn []\n" }, { "alpha_fraction": 0.7397959232330322, "alphanum_fraction": 0.7397959232330322, "avg_line_length": 21.615385055541992, "blob_id": "4449a31a971fba9fe7ac59fcf9720d101fd94412", "content_id": "b5d1b0b91476676186ceba4839a533d01c63d2fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 588, "license_type": "no_license", "max_line_length": 66, "num_lines": 26, "path": "/src/phantomjs/query_points_with_phantomjs.py", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "import time\nfrom subprocess import check_output\nfrom os import listdir\nfrom os.path import isfile, join\nimport json\n\nclass PhantomJSQuery:\n\t_phantom_js = join(\"phantomjs\",\"phantom_getuserstats.js\")\n\n\tdef getCurrentPointsForUser(self,DisplayName):\n\t\tres = check_output([\"phantomjs\", self._phantom_js, DisplayName])\n\t\treturn json.loads(res)\n\nclass MockQuery:\n\n\tdef __init__(self,points):\n\t\tself._points = points\n\n\tdef getCurrentPointsForUser(self,DisplayName):\n\t\treturn self._points\n\ndef main():\n\tprint PhatnomJSQuery().getCurrentPointsForUser(\"Andrej Gajduk\")\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.7186357975006104, "alphanum_fraction": 0.7186357975006104, "avg_line_length": 25.483871459960938, "blob_id": "1b59a9fb184090b4c1ae22e1bbd2a3c39011cdd2", "content_id": "174b598c1c4979d6b64589fd1770f4d440d9fec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 821, "license_type": "no_license", "max_line_length": 93, "num_lines": 31, "path": "/src/query_points.py", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "from phantomjs.query_points_with_phantomjs import PhantomJSQuery\nfrom database_interface import MongoDBInterface\nfrom dates import Dates\nfrom users_list import UsersListDB\n\nclass QueryPoints:\n\t#database interface\n\t_db = ''\n\t#query interface\n\t_query = ''\n\t_users_list = ''\n\n\tdef __init__(self, db=MongoDBInterface(), query=PhantomJSQuery(), users_list=UsersListDB()):\n\t\tself._db = db\n\t\tself._query = query\n\t\tself._users_list = users_list\n\n\tdef queryAndSavePointsForAllUsers(self):\n\t\tusers = self._users_list.getUsers()\n\t\tres = []\n\t\tfor user in users:\n\t\t\tpoints = self._query.getCurrentPointsForUser(user[\"DisplayName\"])\n\t\t\tself._db.savePoints(user[\"RealName\"],points)\n\t\t\tres.append({\"user\":user,\"points\":points})\n\t\treturn res\n\ndef main():\n\tQueryPoints().queryAndSavePointsForAllUsers()\n\nif __name__ == \"__main__\":\n\tmain()\n" }, { "alpha_fraction": 0.6303257942199707, "alphanum_fraction": 0.6303257942199707, "avg_line_length": 21.799999237060547, "blob_id": "dfe1b03ebd6a6a84b09fb59f8a6d32f0005d6a44", "content_id": "97d5f6a95a47a770bac1a7f32a58897c4f68ea64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 798, "license_type": "no_license", "max_line_length": 69, "num_lines": 35, "path": "/src/users_list.py", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "import json\nfrom database_interface import MongoDBInterface\n\nclass UsersListJsonFile:\n\n def __init__(self,users_file=\"users.json\"):\n self._users_file = users_file\n\n def getUsers(self):\n return json.load(open(self._users_file,\"r\"))\n\nclass UsersListDB:\n\n _db = ''\n\n def __init__(self,db=MongoDBInterface(),users_coll=\"Users\"):\n self._db = db\n self._users_coll = users_coll\n\n def getUsers(self):\n return [e for e in self._db.getDB()[self._users_coll].find()]\n\n\nclass AndrejGajdukUsersList:\n\n def getUsers(self):\n return [{\"RealName\":\"Andrej Gajduk\", \"DisplayName\":\"Andrej Gajduk\"}]\n\ndef main():\n print UsersListJsonFile().getUsers()\n print UsersListDB().getUsers()\n print AndrejGajdukUsersList().getUsers()\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7247058749198914, "alphanum_fraction": 0.7247058749198914, "avg_line_length": 31.69230842590332, "blob_id": "af2e2c6dae031bf052ab8e2dc5bfd4d5a1bbc43b", "content_id": "12f742a0c4fd85f4d31290db434dab0c2e014b2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 850, "license_type": "no_license", "max_line_length": 81, "num_lines": 26, "path": "/src/main.py", "repo_name": "gajduk/MendixPointsReport", "src_encoding": "UTF-8", "text": "from logger import log\nimport traceback\nfrom database_interface import MongoDBInterface\nfrom dates import Dates\nfrom users_list import UsersListDB\nfrom query_points import QueryPoints\nfrom phantomjs.query_points_with_phantomjs import PhantomJSQuery\n\ndef queryPointsAndSave(db,users_list):\n query_backend = PhantomJSQuery()\n query_executor = QueryPoints(db=db,query=query_backend,users_list=users_list)\n log(\"Started querying\")\n query_res = query_executor.queryAndSavePointsForAllUsers()\n log(\"Querying finished:\"+str(query_res))\n\ndef main():\n try:\n log(\"Started main\")\n db = MongoDBInterface(dates=Dates(), useTestColl=False)\n users_list = UsersListDB(db)\n queryPointsAndSave(db,users_list)\n except Exception as e:\n log(traceback.format_exc(),error=True)\n\nif __name__ == \"__main__\":\n main()\n" } ]
10
luisendres/python
https://github.com/luisendres/python
56041725efb01f036e49205f626f3d41c3acaef8
2c127b5086ae024f41fb2179fbb2b62291e4e344
537d67757a51e49339341ebde0ad55b3106bed14
refs/heads/main
2023-07-07T05:04:05.945464
2021-07-29T17:28:44
2021-07-29T17:28:44
383,918,138
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5767441987991333, "alphanum_fraction": 0.5837209224700928, "avg_line_length": 24.058822631835938, "blob_id": "1307ce1d3ea6f9080680d049f14bb5ea0499688a", "content_id": "6de124e1a3a1c8990d2b8b6ca4580bcd9d29765d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 54, "num_lines": 17, "path": "/flask/fundamentals/checkerboard/server.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\napp = Flask(__name__)\n\[email protected]('/')\ndef checkboard ():\n return render_template(\"index.html\", x = 8, y = 8)\n \[email protected]('/<int:x>')\ndef checkboard_x (x):\n return render_template(\"index.html\", x = x, y = 8)\n \[email protected]('/<int:x>/<int:y>')\ndef checkboard_x_y(x,y):\n return render_template(\"index.html\", x = x, y = y)\n\nif __name__ =='__main__':\n app.run(debug=True)\n " }, { "alpha_fraction": 0.4637419283390045, "alphanum_fraction": 0.4787096679210663, "avg_line_length": 31.571428298950195, "blob_id": "3fb64c3052fe38f8a36d36963fe81bdb4be18f74", "content_id": "74b6168ab12a3ff104a801845729d14399df534b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3875, "license_type": "no_license", "max_line_length": 191, "num_lines": 119, "path": "/fundamentals/fundamentals/functions_intermediate.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "# #1 ------------------------------------------------------------------------UPDATE VALUES IN DICTIONARIES AND LISTS------------------------------------------------------------------------\nx = [ [5,2,3], [10,8,9] ] \nstudents = [\n {'first_name': 'Michael', 'last_name' : 'Jordan'},\n {'first_name' : 'John', 'last_name' : 'Rosales'}\n]\nsports_directory = {\n 'basketball' : ['Kobe', 'Jordan', 'James', 'Curry'],\n 'soccer' : ['Messi', 'Ronaldo', 'Rooney']\n}\nz = [ {'x': 10, 'y': 20} ]\n\n#1.Change the value of 10 in x to 15. Once you're done, x should now be [[5,2,3],[15,8,9]].\nx[1][0] = 15\nprint(x)\n\n#2.Change the last_name of the first student from 'Jordan' to 'Bryant'\nstudents [0]['last_name'] = 'Bryant'\nprint(students[0]['last_name'])\n\n#3.In the sports_directory, change 'Messi' to 'Andres'\nsports_directory['soccer'][0] = \"Andres\"\nprint(sports_directory['soccer'][0])\n\n#4.Change the value 20 in z to 30\nz[0][\"y\"] = 30\nprint(z[0][\"y\"])\n\n#2 ------------------------------------------------------------------------ITERATE THROUGH A LIST OF DICTIONARIES------------------------------------------------------------------------\nstudents = [\n {'first_name': 'Michael', 'last_name' : 'Jordan'},\n {'first_name' : 'John', 'last_name' : 'Rosales'},\n {'first_name' : 'Mark', 'last_name' : 'Guillen'},\n {'first_name' : 'KB', 'last_name' : 'Tonel'}\n ]\n\ndef iterateDictionary(some_list):\n for i in range(0,len(some_list)):\n for key, val in some_list[i].items():\n print(key, \"-\",val)\n \niterateDictionary(students)\n\n# Another way to do it \n# def iterateDictionary(some_list):\n# for i in range(0,len(some_list)):\n# output = \"\"\n# for key,val in some_list[i].items():\n# output += f\" {key} - {val},\"\n# print(output)\n\n# iterateDictionary(students)\n\n\n# should output: (it's okay if each key-value pair ends up on 2 separate lines;\n# bonus to get them to appear exactly as below!)\n# first_name - Michael, last_name - Jordan\n# first_name - John, last_name - Rosales\n# first_name - Mark, last_name - Guillen\n# first_name - KB, last_name - Tonel\n\n#3 ------------------------------------------------------------------------GET VALUES FROM A LIST OF DICTIONARIES------------------------------------------------------------------------\ndef iterateDictionary2(key_name, some_list):\n for i in range(0,len(some_list)):\n print(some_list[i][key_name])\n\niterateDictionary2('first_name',students)\niterateDictionary2('last_name',students)\n\n# Another way to do it \n# def iterateDictionary2(key_name, some_list):\n# for key,val in some_list[i].items():\n# if key == key_name:\n# print(val)\n\n\n#4 ------------------------------------------------------------------------ITERATE THOUGH A DICTIONARY WITH LIST VALUES------------------------------------------------------------------------\ndojo = {\n 'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],\n 'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']\n}\n\n# def printInfo(some_dict):\n# for i in some_dict:\n# print(i)\n# for j in range(0,len(some_dict[i])):\n# print(some_dict[i][j])\n \n# printInfo(dojo)\n# # output:\n# 7 LOCATIONS\n# San Jose\n# Seattle\n# Dallas\n# Chicago\n# Tulsa\n# DC\n# Burbank\n \n# 8 INSTRUCTORS\n# Michael\n# Amy\n# Eduardo\n# Josh\n# Graham\n# Patrick\n# Minh\n# Devon\n\n\n# Another way to do it\n# def printInfo(some_dict):\n# for key, val in some_dict.items():\n# print(\"--------------\")\n# print(f\"{len(val)} {key.upper()}\")\n# for i in range(0, len(val)):\n# print(val[i])\n \n# printInfo(dojo)" }, { "alpha_fraction": 0.6260387897491455, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 24.821428298950195, "blob_id": "bcd7e901fe6db6ed4283772402f0b2a5e999c120", "content_id": "b3e923785ce8097e8ac65d9baa5355f1e542d5c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 68, "num_lines": 28, "path": "/flask/fundamentals/counter/server.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "#pipenv install flask ..... to make pipfile and pipfile.lock\n#pipenv shell ..... to enter into shell\n#python server.py ..... start your server\n\nfrom flask import Flask, render_template, request, redirect, session\napp = Flask(__name__)\napp.secret_key = 'thisSecretKey_isJustforDemonstration'\n\[email protected]('/')\ndef counter():\n if 'i' not in session:\n session['i'] = 0\n session['i'] +=1\n return render_template('index.html', i = session['i'])\n\[email protected]('/restart')\ndef restart():\n session.clear()\n # session['i'] = 0\n return redirect('/')\n\[email protected]('/plus')\ndef plus():\n session['i'] += 1\n return redirect('/')\n#this must be below ALL ROUTES\nif __name__ =='__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.6318471431732178, "alphanum_fraction": 0.6318471431732178, "avg_line_length": 42.61111068725586, "blob_id": "d754ad2ae0ac081b7fd0deb6cf4662fdadf8a43b", "content_id": "92b6cadc2ccc146bf077b5a43201a2c7fd3c8473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 785, "license_type": "no_license", "max_line_length": 137, "num_lines": 18, "path": "/flask_mysql/crud/dojos_ninjas_crud/flask_app/models/ninja.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "from flask_app.config.mysqlconnection import connectToMySQL\n# model the class after the users table from our database\nclass Ninja:\n def __init__(self, data):\n self.id = data['id']\n self.first_name = data['first_name']\n self.last_name = data['last_name']\n self.age = data['age']\n self.dojo_id = data['dojo_id']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n\n\n @classmethod\n def save_ninja(cls, data ):\n query = \"INSERT INTO ninja (first_name, last_name, age, dojo_id) VALUES ( %(first_name)s , %(last_name)s , %(age)s, %(dojo_id)s)\"\n # data is a dictionary that will be passed into the save method from server.py\n return connectToMySQL('dojos_and_ninjas').query_db(query,data)\n" }, { "alpha_fraction": 0.6526315808296204, "alphanum_fraction": 0.6526315808296204, "avg_line_length": 32, "blob_id": "41dbf92488cb4d5495f6ad1209b1db76de4a5adf", "content_id": "e3dc0c84671dff91637e7b230b370001c45f911a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 75, "num_lines": 20, "path": "/flask_mysql/crud/dojos_ninjas_crud/flask_app/controllers/ninjas.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "from flask import render_template, request, redirect\nfrom flask_app import app\nfrom flask_app.models.ninja import Ninja\nfrom flask_app.models.dojo import Dojo\n\[email protected]('/ninjas')\ndef create():\n return render_template(\"ninja.html\", all_dojos = Dojo.get_all_dojos())\n\[email protected]('/ninjas/create_ninja', methods=['POST'])\ndef create_ninja():\n data = {\n \"dojo_id\": request.form['dojo_id'],\n \"first_name\": request.form['first_name'],\n \"last_name\": request.form['last_name'],\n \"age\": request.form['age']\n }\n Ninja.save_ninja(data)\n redirect_here = \"/dojos/\" + request.form['dojo_id']\n return redirect(redirect_here)\n \n" }, { "alpha_fraction": 0.6606929302215576, "alphanum_fraction": 0.6606929302215576, "avg_line_length": 28.928571701049805, "blob_id": "140ced90e7ae2152f6fea7f90e29256664e9a526", "content_id": "8bf2769dabb8352a748fc905477b1b8fd31e5886", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "no_license", "max_line_length": 68, "num_lines": 28, "path": "/flask/fundamentals/dojo_survey/server.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "#pipenv install flask ..... to make pipfile and pipfile.lock\n#pipenv shell ..... to enter into shell\n#python server.py ..... start your server\n\nfrom flask import Flask, render_template, request, redirect, session\napp = Flask(__name__)\napp.secret_key = 'thisSecretKey_isJustforDemonstration'\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\[email protected]('/process', methods=['POST'])\ndef process():\n session ['name'] = request.form['name']\n session ['location'] = request.form['location']\n session ['language'] = request.form['language']\n session ['comment'] = request.form['comment']\n return redirect('/result')\n\[email protected]('/result')\ndef result():\n if 'name' not in session:\n return redirect ('/')\n return render_template('result.html')\n\nif __name__ =='__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.622963547706604, "alphanum_fraction": 0.622963547706604, "avg_line_length": 24.739999771118164, "blob_id": "2b4d0f99523872373e48f39dbe693b13f6eeab10", "content_id": "4b8d9d12c2d9868180d7346b1c556f9dd30722d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1289, "license_type": "no_license", "max_line_length": 71, "num_lines": 50, "path": "/flask_mysql/crud/usersCR/flask_app/controllers/users.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "from flask import render_template, request, redirect\nfrom flask_app import app\nfrom flask_app.models.user import user\n\n\[email protected]('/users')\ndef read():\n return render_template(\"readAll.html\", all_users = user.get_all())\n\[email protected]('/users/create')\ndef create():\n return render_template(\"create.html\")\n\[email protected]('/users/create_user', methods=['POST'])\ndef create_user():\n data = {\n \"first_name\": request.form['first_name'],\n \"last_name\": request.form['last_name'],\n \"email\": request.form['email']\n }\n user.save(data)\n return redirect('/users')\n\[email protected]('/users/<int:id>')\ndef show(id):\n return render_template(\"readOne.html\", user_one = user.get_one(id))\n\[email protected]('/users/<int:id>/edit')\ndef edit(id):\n return render_template(\"edit.html\", user_one = user.get_one(id))\n\[email protected]('/users/<int:id>/edit_user', methods=['POST'])\ndef edit_user(id):\n data = {\n \"id\": id,\n \"first_name\": request.form['first_name'],\n \"last_name\": request.form['last_name'],\n \"email\": request.form['email']\n }\n user.update(data)\n return redirect('/users')\n\[email protected]('/delete/<int:id>')\ndef delete(id):\n user.delete_one(id)\n return redirect('/users') \n\[email protected]('/')\ndef new():\n return redirect('/users') " }, { "alpha_fraction": 0.596666693687439, "alphanum_fraction": 0.596666693687439, "avg_line_length": 22.153846740722656, "blob_id": "29d129dea92b1bb302d79207a2d55719dfaaa840", "content_id": "2018e6a61c8bb5f52d8b85a7d8adb837904e4d38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 300, "license_type": "no_license", "max_line_length": 62, "num_lines": 13, "path": "/flask/fundamentals/playground/server.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\napp = Flask(__name__)\n\[email protected]('/play/<int:x>/<shade>')\ndef index (x, shade):\n return render_template(\"index.html\", x = x, shade = shade)\n \n# @app.route('/dojo')\n# def success():\n# return \"Dojo!\"\n\nif __name__ =='__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.6382352709770203, "alphanum_fraction": 0.6382352709770203, "avg_line_length": 22.413793563842773, "blob_id": "de6b56f1a00dfa478f4a4b4d5193b6190cf8774e", "content_id": "0ae64b1bba801d4158479a8e878144b0040ff896", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 680, "license_type": "no_license", "max_line_length": 75, "num_lines": 29, "path": "/flask_mysql/crud/dojos_ninjas_crud/flask_app/controllers/dojos.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "from flask import render_template, request, redirect\nfrom flask_app import app\nfrom flask_app.models.dojo import Dojo\n\[email protected]('/dojos')\ndef read():\n return render_template(\"index.html\", all_dojos = Dojo.get_all_dojos())\n\[email protected]('/dojos/create_dojo', methods= ['POST'])\ndef create_dojo():\n data = {\n \"name\": request.form['name']\n }\n Dojo.save_dojo(data)\n return redirect('/dojos')\n\[email protected]('/dojos/<dojo_id>')\ndef get_ninjas(dojo_id):\n data = {\n 'id': dojo_id\n }\n\n this_dojo = Dojo.get_ninjas_from_dojo(data)\n\n return render_template('dojos.html', this_dojo=this_dojo)\n\[email protected]('/')\ndef new():\n return redirect('/dojos') " }, { "alpha_fraction": 0.5628698468208313, "alphanum_fraction": 0.5850591659545898, "avg_line_length": 34.605262756347656, "blob_id": "4b446033ec007bb020a0b5cd3a5e7454c326dc65", "content_id": "4bdb4295489ecf7c4f75687295bbefc546aaffdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 79, "num_lines": 38, "path": "/fundamentals/oop/dojo_pets/classes/pets.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "class Pet:\n# implement __init__( name , type , tricks ):\n def __init__(self, name, health = 100, energy = 100):\n self.name = name\n # self.type = type\n # self.tricks = tricks\n self.health = health\n self.energy = energy\n# implement the following methods:\n# sleep() - increases the pets energy by 25\n def sleep(self):\n self.energy += 25;\n print(f\"{self.name} has slept energy has increased to : {self.energy}\")\n return self\n# eat() - increases the pet's energy by 5 & health by 10\n def eat(self):\n self.energy += 5\n self.health += 10\n print(f\"{self.name} has eaten health has increased to : {self.health}\")\n print(f\"{self.name} has eaten energy has increased to : {self.energy}\")\n return self\n# play() - increases the pet's health by 5\n def play(self):\n self.health += 5\n print(f\"{self.name} played health has increased to : {self.health}\")\n return self\n# noise() - prints out the pet's sound\n def noise(self):\n print(f\"{self.name} makes sound\")\n return self\n\n# class Dog(Pet):\n# def __init__(self, name, health = 100, energy = 100):\n# super().__init__(name, health, energy)\n\n# class Cat(Pet):\n# def __init__(self, name, health = 100, energy = 100):\n# super().__init__(name, health, energy)" }, { "alpha_fraction": 0.6101545095443726, "alphanum_fraction": 0.61103755235672, "avg_line_length": 38.73684310913086, "blob_id": "3e7fe252ff3c00cf5caa93fb1dc2c55c6e38e185", "content_id": "5a192ecdc049a1e35a5f1005307d30f10aa9045c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2265, "license_type": "no_license", "max_line_length": 143, "num_lines": 57, "path": "/flask_mysql/crud/usersCR/flask_app/models/user.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "from flask_app.config.mysqlconnection import connectToMySQL\n# model the class after the users table from our database\nclass user:\n def __init__(self, data):\n self.id = data['id']\n self.first_name = data['first_name']\n self.last_name = data['last_name']\n self.email = data['email']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n\n # Now we use class methods to query our database\n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM user;\"\n # make sure to call the connectToMySQL function with the schema you are targeting.\n results = connectToMySQL('users_crud').query_db(query)\n # Create an empty list to append our instances of users\n users = []\n # Iterate over the db results and create instances of users with cls.\n for user in results:\n users.append( cls(user) )\n return users\n\n @classmethod\n def get_one(cls, id):\n query = \"SELECT * FROM user WHERE id = %(id)s;\"\n data = {\n \"id\": id\n }\n # make sure to call the connectToMySQL function with the schema you are targeting.\n result = connectToMySQL('users_crud').query_db(query, data)\n if len (result) > 0 :\n return result[0]\n else:\n return False\n\n @classmethod\n def delete_one(cls, id):\n query = \"DELETE FROM user WHERE id = %(id)s;\"\n data = {\n 'id': id\n }\n return connectToMySQL('users_crud').query_db(query, data)\n\n @classmethod\n def update(cls, data):\n query = \"UPDATE user SET first_name =%(first_name)s, last_name= %(last_name)s, email= %(email)s ,updated_at = NOW() WHERE id = %(id)s;\"\n # make sure to call the connectToMySQL function with the schema you are targeting.\n return connectToMySQL('users_crud').query_db(query, data)\n\n # class method to save our user to the database\n @classmethod\n def save(cls, data ):\n query = \"INSERT INTO user (first_name, last_name, email) VALUES ( %(first_name)s , %(last_name)s , %(email)s)\"\n # data is a dictionary that will be passed into the save method from server.py\n return connectToMySQL('users_crud').query_db(query,data)\n" }, { "alpha_fraction": 0.6070529222488403, "alphanum_fraction": 0.6070529222488403, "avg_line_length": 35.1363639831543, "blob_id": "5f6b64ec86cb9f50857ab4f83cd97d4fdd0d6727", "content_id": "e79cae2afbfd7b3d9037afd4999841a540bb760b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 795, "license_type": "no_license", "max_line_length": 89, "num_lines": 22, "path": "/fundamentals/oop/dojo_pets/classes/ninja.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "class Ninja:\n# new_ninja = Ninja(pet = Pet(\"Fluffy\"))\n# implement __init__( first_name , last_name , treats , pet_food , pet )\n def __init__(self, first_name, last_name, pet, treats = \"treats\", pet_food = \"food\"):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n# implement the following methods:\n# walk() - walks the ninja's pet invoking the pet play() method\n def walk(self):\n self.pet.play()\n return self\n# feed() - feeds the ninja's pet invoking the pet eat() method\n def feed(self):\n self.pet.eat()\n return self\n# bathe() - cleans the ninja's pet invoking the pet noise() method\n def bath(self):\n self.pet.noise()\n return self" }, { "alpha_fraction": 0.6069868803024292, "alphanum_fraction": 0.663755476474762, "avg_line_length": 35.967742919921875, "blob_id": "6df5c300ed39cc4403b9aa9ee248e9028e241ab8", "content_id": "54bf17dd9ce4af288e7ef6594521c5f4879040f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1145, "license_type": "no_license", "max_line_length": 132, "num_lines": 31, "path": "/fundamentals/oop/user.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "class User:\n def __init__(self, name, balance):\n self.name = name\n self.balance = balance\n def make_deposit(self, amount):\n self.balance += amount\n return self\n def make_withdraw(self, amount):\n self.balance -= amount\n return self\n def display_user_balance(self):\n print(f\"User: {self.name} \\nAccount Balance: ${self.balance}\")\n return self\n def transfer_money(self, other_user, amount):\n self.balance -= amount\n other_user.balance += amount\n print(f\"User: {self.name} \\nAccount Balance: ${self.balance}\")\n print(f\"User: {other_user.name} \\nAccount Balance: ${other_user.balance}\")\n return self\n\nluis = User(\"Luis\", 2000)\ngrace = User(\"Grace\", 100000)\nnina = User(\"Nina\", 50000)\n\nluis.make_deposit(1000).make_deposit(1000).make_deposit(2000).make_withdraw(3000).display_user_balance().transfer_money(grace, 1000)\n\ngrace.make_deposit(1000).make_deposit(1000).make_withdraw(100).make_withdraw(100).display_user_balance()\n\nnina.make_deposit(2000).make_withdraw(1000).make_withdraw(1000).display_user_balance()\n\nluis.transfer_money(nina, 1000)" }, { "alpha_fraction": 0.5887380838394165, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 40.4375, "blob_id": "0e0cd8aed063a903e32f23d78ec182965cb6687e", "content_id": "b5b68381b8cbded8b4eebaa25315e293954bab1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1989, "license_type": "no_license", "max_line_length": 103, "num_lines": 48, "path": "/flask_mysql/crud/dojos_ninjas_crud/flask_app/models/dojo.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "from flask_app.config.mysqlconnection import connectToMySQL\nfrom flask_app.models import ninja\n# model the class after the users table from our database\nclass Dojo:\n def __init__(self, data):\n self.id = data['id']\n self.name = data['name']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n\n self.ninjas = []\n\n @classmethod\n def get_ninjas_from_dojo(cls, data):\n query = \"SELECT * FROM dojo LEFT JOIN ninja ON ninja.dojo_id = dojo.id WHERE dojo.id = %(id)s;\"\n results = connectToMySQL('dojos_and_ninjas').query_db(query, data) \n dojo = cls(results[0])\n # dojo = Dojo(results[0]) ANOTHER WAY TO DO IT\n for row_from_db in results:\n ninja_data = {\n 'id': row_from_db['ninja.id'],\n 'first_name': row_from_db['first_name'],\n 'last_name': row_from_db['last_name'],\n 'age': row_from_db['age'],\n 'dojo_id': row_from_db['dojo_id'],\n 'created_at': row_from_db['ninja.created_at'],\n 'updated_at': row_from_db['ninja.updated_at']\n }\n dojo.ninjas.append(ninja.Ninja(ninja_data))\n return dojo\n\n @classmethod\n def get_all_dojos(cls):\n query = \"SELECT * FROM dojo;\"\n # make sure to call the connectToMySQL function with the schema you are targeting.\n results = connectToMySQL('dojos_and_ninjas').query_db(query)\n # Create an empty list to append our instances of users\n dojos = []\n # Iterate over the db results and create instances of users with cls.\n for dojo in results:\n dojos.append( cls(dojo) )\n return dojos\n\n @classmethod\n def save_dojo(cls, data ):\n query = \"INSERT INTO dojo (name) VALUES (%(name)s)\"\n # data is a dictionary that will be passed into the save method from server.py\n return connectToMySQL('dojos_and_ninjas').query_db(query,data)\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6071599125862122, "avg_line_length": 37.814815521240234, "blob_id": "7f8dc7e7fee6b85088b98a1e1f2944442c150f01", "content_id": "b2a03da6bbaf0f4ecd0bb4b377173c109aefd256", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2095, "license_type": "no_license", "max_line_length": 97, "num_lines": 54, "path": "/flask/fundamentals/dojo_gold/server.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "#pipenv install flask ..... to make pipfile and pipfile.lock\n#pipenv shell ..... to enter into shell\n#python server.py ..... start your server\nfrom typing import Counter\nfrom flask import Flask, render_template, request, redirect, session\nimport random\napp = Flask(__name__)\napp.secret_key = 'thisSecretKey_isJustforDemonstration'\n\n\[email protected]('/')\ndef index():\n if 'gold' not in session:\n session['gold'] = 0\n # session['activity_list'] = []\n session['activity'] = \"\"\n return render_template(\"index.html\")\n\[email protected]('/process_money', methods=['POST'])\ndef process_money():\n if request.form['building'] == 'Farm':\n gold = random.randint(10,21)\n session['gold'] += gold\n # session['activity_list'].append(f\"Earned {gold} golds from the farm!\")\n session['activity'] += (f\"\\nEarned {gold} golds from the Farm!\")\n if request.form['building'] == 'Cave':\n gold = random.randint(5,11)\n session['gold'] += gold\n # session['activity_list'].append(f\"Earned {gold} golds from the Cave!\")\n session['activity'] += (f\"\\nEarned {gold} golds from the Cave!\")\n if request.form['building'] == 'House':\n gold = random.randint(2,6)\n session['gold'] += gold\n # session['activity_list'].append(f\"Earned {gold} golds from the House!\")\n session['activity'] += (f\"\\nEarned {gold} golds from the House!\")\n if request.form['building'] == 'Casino':\n gold = random.randint(-50,50)\n session['gold'] += gold\n if gold > 0:\n # session['activity_list'].append(f\"Earned {gold} golds from the Casino!\")\n session['activity'] += (f\"\\nEarned {gold} golds from the Casino!\")\n else:\n # session['activity_list'].append(f\"Entered a casino and lost {gold} golds... Ouch.\")\n session['activity'] += (f\"\\nEntered a casino and lost {gold} golds... Ouch.\")\n return redirect('/')\n\[email protected]('/restart')\ndef restart():\n session.clear()\n return redirect('/')\n\n#this must be below ALL ROUTES\nif __name__ =='__main__':\n app.run(debug=True)" }, { "alpha_fraction": 0.5874316692352295, "alphanum_fraction": 0.5949453711509705, "avg_line_length": 25.618181228637695, "blob_id": "88ecb469f7a403feb8113a91a2370fbc2f6dc083", "content_id": "345ca18acc584421f4cec5635ae2e8a23597db3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1464, "license_type": "no_license", "max_line_length": 88, "num_lines": 55, "path": "/fundamentals/oop/user_with_bank_accounts.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "class User:\n def __init__(self, name, email):\n self.name = name\n self.email = email\n self.account = BankAccount(balance = 0)\n def make_deposit(self):\n self.account.deposit(100)\n return self\n def make_withdraw(self):\n self.account.withdraw(100)\n return self\n def display_user_balance(self):\n self.account.display_account_info()\n return self\n\nclass BankAccount:\n\n all_accounts = []\n def __init__(self, balance = 0):\n self.balance = balance\n BankAccount.all_accounts.append(self)\n\n def deposit(self, amount):\n self.balance += amount\n return self\n\n def withdraw(self, amount):\n if BankAccount.can_withdraw(self.balance,amount):\n self.balance -= amount\n else:\n print(\"Insufficient Funds: Charging a $5 fee.\")\n self.balance -= 5\n return self\n\n def display_account_info(self):\n print(f\"Account Balance: ${self.balance}\")\n return self\n\n @staticmethod\n def can_withdraw(balance,amount):\n if (balance - amount) < 0:\n return False\n else:\n return True\n\n @classmethod\n def all_info(cls):\n for BankAccount in cls.all_accounts:\n print(BankAccount.balance)\n\nluis = User(\"Luis\",\"luis@gmail\")\ngrace = User(\"Grace\", \"grace@gmail\")\n\nprint(luis.name)\nluis.make_deposit().make_deposit().make_deposit().make_withdraw().display_user_balance()\n" }, { "alpha_fraction": 0.6276422739028931, "alphanum_fraction": 0.6471544504165649, "avg_line_length": 23.639999389648438, "blob_id": "89778d88dcc7285c4e82333f93cd6e291e54c774", "content_id": "be2c88a4169cc2e24ee948cbbbea7c572efa06ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "no_license", "max_line_length": 57, "num_lines": 25, "path": "/fundamentals/oop/dojo_pets/dojo_pets.py", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "from classes.ninja import Ninja\nfrom classes.pets import Pet\n\nclass Dog(Pet):\n def __init__(self, name, health = 100, energy = 100):\n super().__init__(name, health, energy)\n\nclass Cat(Pet):\n def __init__(self, name, health = 100, energy = 100):\n super().__init__(name, health, energy)\n\nluis = Ninja(\"Luis\",\"Endres\",pet = Cat(\"Fluffy\"))\ngrace = Ninja(\"Grace\",\"Lucas\", pet = Dog(\"Brooklyn\"))\n\nprint(luis.pet.name)\nprint(luis.pet.health)\n\nprint(grace.pet.name)\nprint(grace.pet.health)\n\nluis.pet.sleep()\nluis.walk().feed().bath().pet.sleep()\n\ngrace.pet.sleep()\ngrace.walk().feed().bath().pet.sleep()" }, { "alpha_fraction": 0.4724583029747009, "alphanum_fraction": 0.47623544931411743, "avg_line_length": 39.74359130859375, "blob_id": "96d3e95315d52b6a3962b87c34266f1cf7d0b6d3", "content_id": "ab6786a9830aef94a08aa2374722a435f86c8564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3177, "license_type": "no_license", "max_line_length": 148, "num_lines": 78, "path": "/flask_mysql/validation/login_register/log_reg_app/templates/index.html", "repo_name": "luisendres/python", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n <title>Document</title>\n <link href=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css\" rel=\"stylesheet\" integrity=\"sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC\" crossorigin=\"anonymous\">\n</head>\n<body>\n <div>\n <h1>Login and Registration</h1>\n <h2>Register</h2>\n <form action=\"/register\" method=\"POST\">\n <label for=\"first_name\">First Name:</label>\n <input type=\"text\" name=\"first_name\">\n {% with f_name_error = get_flashed_messages(category_filter=['first_name']) %}\n {% if f_name_error: %}\n {% for message in f_name_error: %}\n <p class=\"text-danger\">{{ message }}</p>\n {% endfor %}\n {% endif %}\n {% endwith %}\n <label for=\"last_name\">Last Name:</label>\n <input type=\"text\" name=\"last_name\">\n {% with l_name_error = get_flashed_messages(category_filter=['last_name']) %}\n {% if l_name_error: %}\n {% for message in l_name_error: %}\n <p class=\"text-danger\">{{ message }}</p>\n {% endfor %}\n {% endif %}\n {% endwith %}\n <label for=\"email\">Email:</label>\n <input type=\"text\" name=\"email\">\n {% with email_error = get_flashed_messages(category_filter=['email']) %}\n {% if email_error: %}\n {% for message in email_error: %}\n <p class=\"text-danger\">{{ message }}</p>\n {% endfor %}\n {% endif %}\n {% endwith %}\n <label for=\"password\">Password:</label>\n <input type=\"password\" name=\"password\">\n {% with password_error = get_flashed_messages(category_filter=['password']) %}\n {% if password_error: %}\n {% for message in password_error: %}\n <p class=\"text-danger\">{{ message }}</p>\n {% endfor %}\n {% endif %}\n {% endwith %}\n <label for=\"confirm_password\">Confirm Password:</label>\n <input type=\"password\" name=\"confirm_password\">\n <div>\n <input type=\"submit\" value= \"Register\">\n </div>\n </form>\n </div>\n <div>\n <h1>Login</h1>\n <form action=\"/login\" method=\"POST\">\n <label for=\"email\">Email:</label>\n <input type=\"text\" name=\"email\">\n {% with email_error = get_flashed_messages(category_filter=['login_email']) %}\n {% if email_error: %}\n {% for message in email_error: %}\n <p class=\"text-danger\">{{ message }}</p>\n {% endfor %}\n {% endif %}\n {% endwith %}\n <label for=\"password\">Password:</label>\n <input type=\"password\" name=\"password\">\n <div>\n <input type=\"submit\" value= \"Login\">\n </div>\n </form>\n </div>\n</body>\n</html>" } ]
18
SecretSanta007/MovieCatalog
https://github.com/SecretSanta007/MovieCatalog
b46b513130bca0f0b270cf7e3f373831a16ff10b
2f33e3057d7995a55d71f2b00a6bd09eec93bc56
9c8908b20fb51b27de966cae13403be4c4fe3e9b
refs/heads/master
2023-01-18T20:19:57.284409
2020-11-30T02:26:40
2020-11-30T02:26:40
295,223,031
0
0
MIT
2020-09-13T19:31:43
2020-09-13T19:31:45
2020-11-30T03:54:13
null
[ { "alpha_fraction": 0.5091102719306946, "alphanum_fraction": 0.5170008540153503, "avg_line_length": 37.89487838745117, "blob_id": "3f1143f9ec9dac986dcf4d9125cc6ca289d28632", "content_id": "c25a682268f640b279e4fdd0c9e0b987698ec22e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87327, "license_type": "permissive", "max_line_length": 289, "num_lines": 2245, "path": "/main.py", "repo_name": "SecretSanta007/MovieCatalog", "src_encoding": "UTF-8", "text": "\"\"\"\nneeds:\n\npip3 & python3\nalso: \nbeautifulsoup4 4.7.1 \n\n\npip3 install parse-torrent-name #(PTN)\npip3 install Jinja2 \npip3 install imdbpy\npip3 install google #(googlesearch)\npip3 install Pillow\n\nnot to forget that python is buggy without setting locale:\nnano ~/.bash_profile (new file) add:\n\texport LANG=en_US.UTF-8\n\texport LC_ALL=en_US.UTF-8\nsave!\n\npip3 install requests\npipenv install parse-torrent-name --pre --skip-lock\npipenv install requests --pre --skip-lock\npipenv install jinja2 --pre --skip-lock\npipenv install google --pre --skip-lock\npipenv install imdbpy --pre --skip-lock\npipenv install Pillow --pre --skip-lock\n\n\nupdating imdbpy:\npipenv run pip3 install git+https://github.com/alberanid/imdbpy\n\"\"\"\n\nimport io\nimport pickle\nfrom random import *\nimport sys\nimport getopt\nimport pprint\nimport os\nimport string\nimport PTN\nimport re\nimport requests\nimport bs4\nimport json\nfrom jinja2 import Environment, FileSystemLoader\nimport shutil\nimport datetime\nimport time\nfrom urllib.parse import urlparse, parse_qs, unquote, quote_plus\nimport urllib.error\nimport googlesearch\nfrom imdb import IMDb\nfrom imdb.Person import Person\nimport imdb\nimport imdb.utils\nimport copy\nimport base64\nimport collections\nfrom PIL import Image\n\n\ndef CountryNameByCode(name):\n # countries list from stackoverflow: https://stackoverflow.com/questions/16253060/how-to-convert-country-names-to-iso-3166-1-alpha-2-values-using-python\n countries = {\n \"AF\": \"Afghanistan\",\n \"AL\": \"Albania\",\n \"DZ\": \"Algeria\",\n \"AS\": \"American Samoa\",\n \"AD\": \"Andorra\",\n \"AO\": \"Angola\",\n \"AI\": \"Anguilla\",\n \"AQ\": \"Antarctica\",\n \"AG\": \"Antigua and Barbuda\",\n \"AR\": \"Argentina\",\n \"AM\": \"Armenia\",\n \"AW\": \"Aruba\",\n \"AU\": \"Australia\",\n \"AT\": \"Austria\",\n \"AZ\": \"Azerbaijan\",\n \"BS\": \"Bahamas\",\n \"BH\": \"Bahrain\",\n \"BD\": \"Bangladesh\",\n \"BB\": \"Barbados\",\n \"BY\": \"Belarus\",\n \"BE\": \"Belgium\",\n \"BZ\": \"Belize\",\n \"BJ\": \"Benin\",\n \"BM\": \"Bermuda\",\n \"BT\": \"Bhutan\",\n \"BO\": \"Bolivia, Plurinational State of\",\n \"BQ\": \"Bonaire, Sint Eustatius and Saba\",\n \"BA\": \"Bosnia and Herzegovina\",\n \"BW\": \"Botswana\",\n \"BV\": \"Bouvet Island\",\n \"BR\": \"Brazil\",\n \"IO\": \"British Indian Ocean Territory\",\n \"BN\": \"Brunei Darussalam\",\n \"BG\": \"Bulgaria\",\n \"BF\": \"Burkina Faso\",\n \"BI\": \"Burundi\",\n \"KH\": \"Cambodia\",\n \"CM\": \"Cameroon\",\n \"CA\": \"Canada\",\n \"CV\": \"Cape Verde\",\n \"KY\": \"Cayman Islands\",\n \"CF\": \"Central African Republic\",\n \"TD\": \"Chad\",\n \"CL\": \"Chile\",\n \"CN\": \"China\",\n \"CX\": \"Christmas Island\",\n \"CC\": \"Cocos (Keeling) Islands\",\n \"CO\": \"Colombia\",\n \"KM\": \"Comoros\",\n \"CG\": \"Congo\",\n \"CD\": \"Congo, the Democratic Republic of the\",\n \"CK\": \"Cook Islands\",\n \"CR\": \"Costa Rica\",\n \"Code\": \"Country name\",\n \"HR\": \"Croatia\",\n \"CU\": \"Cuba\",\n \"CW\": \"Curaçao\",\n \"CY\": \"Cyprus\",\n \"CZ\": \"Czech Republic\",\n \"Côte d'Ivoire\": \"CI\",\n \"DK\": \"Denmark\",\n \"DJ\": \"Djibouti\",\n \"DM\": \"Dominica\",\n \"DO\": \"Dominican Republic\",\n \"EC\": \"Ecuador\",\n \"EN\": \"England\",\n \"EL\": \"Greece\",\n \"EG\": \"Egypt\",\n \"SV\": \"El Salvador\",\n \"GQ\": \"Equatorial Guinea\",\n \"ER\": \"Eritrea\",\n \"EE\": \"Estonia\",\n \"ET\": \"Ethiopia\",\n \"FK\": \"Falkland Islands (Malvinas)\",\n \"FO\": \"Faroe Islands\",\n \"FJ\": \"Fiji\",\n \"FI\": \"Finland\",\n \"FR\": \"France\",\n \"GF\": \"French Guiana\",\n \"PF\": \"French Polynesia\",\n \"TF\": \"French Southern Territories\",\n \"GA\": \"Gabon\",\n \"GM\": \"Gambia\",\n \"GE\": \"Georgia\",\n \"DE\": \"Germany\",\n \"GH\": \"Ghana\",\n \"GI\": \"Gibraltar\",\n \"GR\": \"Greece\",\n \"GL\": \"Greenland\",\n \"GD\": \"Grenada\",\n \"GP\": \"Guadeloupe\",\n \"GU\": \"Guam\",\n \"GT\": \"Guatemala\",\n \"GG\": \"Guernsey\",\n \"GN\": \"Guinea\",\n \"GW\": \"Guinea-Bissau\",\n \"GY\": \"Guyana\",\n \"HT\": \"Haiti\",\n \"HM\": \"Heard Island and McDonald Islands\",\n \"VA\": \"Holy See (Vatican City State)\",\n \"HN\": \"Honduras\",\n \"HK\": \"Hong Kong\",\n \"HU\": \"Hungary\",\n \"uk\": \"United Kingdom\",\n \"IS\": \"Iceland\",\n \"IN\": \"India\",\n \"ID\": \"Indonesia\",\n \"IR\": \"Iran, Islamic Republic of\",\n \"IQ\": \"Iraq\",\n \"IE\": \"Ireland\",\n \"IM\": \"Isle of Man\",\n \"IL\": \"Israel\",\n \"IT\": \"Italy\",\n \"JM\": \"Jamaica\",\n \"JP\": \"Japan\",\n \"JE\": \"Jersey\",\n \"JO\": \"Jordan\",\n \"KZ\": \"Kazakhstan\",\n \"KE\": \"Kenya\",\n \"KI\": \"Kiribati\",\n \"Korea, Democratic People's Republic of\": \"KP\",\n \"KR\": \"Korea, Republic of\",\n \"KW\": \"Kuwait\",\n \"KG\": \"Kyrgyzstan\",\n \"Lao People's Democratic Republic\": \"LA\",\n \"LV\": \"Latvia\",\n \"LB\": \"Lebanon\",\n \"LS\": \"Lesotho\",\n \"LR\": \"Liberia\",\n \"LY\": \"Libya\",\n \"LI\": \"Liechtenstein\",\n \"LT\": \"Lithuania\",\n \"LU\": \"Luxembourg\",\n \"MO\": \"Macao\",\n \"MK\": \"Macedonia, the former Yugoslav Republic of\",\n \"MG\": \"Madagascar\",\n \"MW\": \"Malawi\",\n \"MY\": \"Malaysia\",\n \"MV\": \"Maldives\",\n \"ML\": \"Mali\",\n \"MT\": \"Malta\",\n \"MH\": \"Marshall Islands\",\n \"MQ\": \"Martinique\",\n \"MR\": \"Mauritania\",\n \"MU\": \"Mauritius\",\n \"YT\": \"Mayotte\",\n \"MX\": \"Mexico\",\n \"FM\": \"Micronesia, Federated States of\",\n \"MD\": \"Moldova, Republic of\",\n \"MC\": \"Monaco\",\n \"MN\": \"Mongolia\",\n \"ME\": \"Montenegro\",\n \"MS\": \"Montserrat\",\n \"MA\": \"Morocco\",\n \"MZ\": \"Mozambique\",\n \"MM\": \"Myanmar\",\n \"NA\": \"Namibia\",\n \"NR\": \"Nauru\",\n \"NP\": \"Nepal\",\n \"NL\": \"Netherlands\",\n \"NC\": \"New Caledonia\",\n \"NZ\": \"New Zealand\",\n \"NI\": \"Nicaragua\",\n \"NE\": \"Niger\",\n \"NG\": \"Nigeria\",\n \"NU\": \"Niue\",\n \"NF\": \"Norfolk Island\",\n \"MP\": \"Northern Mariana Islands\",\n \"NO\": \"Norway\",\n \"OM\": \"Oman\",\n \"PK\": \"Pakistan\",\n \"PW\": \"Palau\",\n \"PS\": \"Palestine, State of\",\n \"PA\": \"Panama\",\n \"PG\": \"Papua New Guinea\",\n \"PY\": \"Paraguay\",\n \"PE\": \"Peru\",\n \"PH\": \"Philippines\",\n \"PN\": \"Pitcairn\",\n \"PL\": \"Poland\",\n \"PT\": \"Portugal\",\n \"PR\": \"Puerto Rico\",\n \"QA\": \"Qatar\",\n \"RO\": \"Romania\",\n \"RU\": \"Russian Federation\",\n \"RW\": \"Rwanda\",\n \"RE\": \"Réunion\",\n \"BL\": \"Saint Barthélemy\",\n \"SH\": \"Saint Helena, Ascension and Tristan da Cunha\",\n \"KN\": \"Saint Kitts and Nevis\",\n \"LC\": \"Saint Lucia\",\n \"MF\": \"Saint Martin (French part)\",\n \"PM\": \"Saint Pierre and Miquelon\",\n \"VC\": \"Saint Vincent and the Grenadines\",\n \"WS\": \"Samoa\",\n \"SM\": \"San Marino\",\n \"ST\": \"Sao Tome and Principe\",\n \"SA\": \"Saudi Arabia\",\n \"SN\": \"Senegal\",\n \"RS\": \"Serbia\",\n \"SC\": \"Seychelles\",\n \"SL\": \"Sierra Leone\",\n \"SG\": \"Singapore\",\n \"SX\": \"Sint Maarten (Dutch part)\",\n \"SK\": \"Slovakia\",\n \"SI\": \"Slovenia\",\n \"SB\": \"Solomon Islands\",\n \"SO\": \"Somalia\",\n \"ZA\": \"South Africa\",\n \"GS\": \"South Georgia and the South Sandwich Islands\",\n \"SS\": \"South Sudan\",\n \"ES\": \"Spain\",\n \"LK\": \"Sri Lanka\",\n \"SD\": \"Sudan\",\n \"SR\": \"Suriname\",\n \"SJ\": \"Svalbard and Jan Mayen\",\n \"SZ\": \"Swaziland\",\n \"SE\": \"Sweden\",\n \"CH\": \"Switzerland\",\n \"SY\": \"Syrian Arab Republic\",\n \"TW\": \"Taiwan, Province of China\",\n \"TJ\": \"Tajikistan\",\n \"TZ\": \"Tanzania, United Republic of\",\n \"TH\": \"Thailand\",\n \"TL\": \"Timor-Leste\",\n \"TG\": \"Togo\",\n \"TK\": \"Tokelau\",\n \"TO\": \"Tonga\",\n \"TT\": \"Trinidad and Tobago\",\n \"TN\": \"Tunisia\",\n \"TR\": \"Turkey\",\n \"TM\": \"Turkmenistan\",\n \"TC\": \"Turks and Caicos Islands\",\n \"TV\": \"Tuvalu\",\n \"UG\": \"Uganda\",\n \"UA\": \"Ukraine\",\n \"AE\": \"United Arab Emirates\",\n \"GB\": \"Great Britain\",\n \"US\": \"United States\",\n \"UM\": \"United States Minor Outlying Islands\",\n \"UY\": \"Uruguay\",\n \"UZ\": \"Uzbekistan\",\n \"VU\": \"Vanuatu\",\n \"VE\": \"Venezuela, Bolivarian Republic of\",\n \"VN\": \"Viet Nam\",\n \"VG\": \"Virgin Islands, British\",\n \"VI\": \"Virgin Islands, U.S.\",\n \"WF\": \"Wallis and Futuna\",\n \"EH\": \"Western Sahara\",\n \"YE\": \"Yemen\",\n \"ZM\": \"Zambia\",\n \"ZW\": \"Zimbabwe\",\n \"Åland Islands\": \"AX\",\n }\n if name.upper() in countries:\n return countries[name.upper()]\n else:\n return name\n\n\ndef LanguageNameByCode(name):\n # languages list from stackoverflow: https://gist.github.com/alexanderjulo/4073388\n languages = {\n \"aa\": \"Afar\",\n \"ab\": \"Abkhazian\",\n \"af\": \"Afrikaans\",\n \"ak\": \"Akan\",\n \"sq\": \"Albanian\",\n \"am\": \"Amharic\",\n \"ar\": \"Arabic\",\n \"an\": \"Aragonese\",\n \"hy\": \"Armenian\",\n \"as\": \"Assamese\",\n \"av\": \"Avaric\",\n \"ae\": \"Avestan\",\n \"ay\": \"Aymara\",\n \"az\": \"Azerbaijani\",\n \"ba\": \"Bashkir\",\n \"bm\": \"Bambara\",\n \"eu\": \"Basque\",\n \"be\": \"Belarusian\",\n \"bn\": \"Bengali\",\n \"bh\": \"Bihari languages\",\n \"bi\": \"Bislama\",\n \"bo\": \"Tibetan\",\n \"bs\": \"Bosnian\",\n \"br\": \"Breton\",\n \"bg\": \"Bulgarian\",\n \"my\": \"Burmese\",\n \"ca\": \"Catalan; Valencian\",\n \"cs\": \"Czech\",\n \"ch\": \"Chamorro\",\n \"ce\": \"Chechen\",\n \"zh\": \"Chinese\",\n \"cu\": \"Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic\",\n \"cv\": \"Chuvash\",\n \"kw\": \"Cornish\",\n \"co\": \"Corsican\",\n \"cr\": \"Cree\",\n \"cy\": \"Welsh\",\n \"cs\": \"Czech\",\n \"da\": \"Danish\",\n \"de\": \"German\",\n \"dv\": \"Divehi; Dhivehi; Maldivian\",\n \"nl\": \"Dutch; Flemish\",\n \"dz\": \"Dzongkha\",\n \"el\": \"Greek, Modern (1453-)\",\n \"en\": \"English\",\n \"eo\": \"Esperanto\",\n \"et\": \"Estonian\",\n \"eu\": \"Basque\",\n \"ee\": \"Ewe\",\n \"fo\": \"Faroese\",\n \"fa\": \"Persian\",\n \"fj\": \"Fijian\",\n \"fi\": \"Finnish\",\n \"fr\": \"French\",\n \"fr\": \"French\",\n \"fy\": \"Western Frisian\",\n \"ff\": \"Fulah\",\n \"Ga\": \"Georgian\",\n \"de\": \"German\",\n \"gd\": \"Gaelic; Scottish Gaelic\",\n \"ga\": \"Irish\",\n \"gl\": \"Galician\",\n \"gv\": \"Manx\",\n \"el\": \"Greek, Modern (1453-)\",\n \"gn\": \"Guarani\",\n \"gu\": \"Gujarati\",\n \"ht\": \"Haitian; Haitian Creole\",\n \"ha\": \"Hausa\",\n \"he\": \"Hebrew\",\n \"hz\": \"Herero\",\n \"hi\": \"Hindi\",\n \"ho\": \"Hiri Motu\",\n \"hr\": \"Croatian\",\n \"hu\": \"Hungarian\",\n \"hy\": \"Armenian\",\n \"ig\": \"Igbo\",\n \"is\": \"Icelandic\",\n \"io\": \"Ido\",\n \"ii\": \"Sichuan Yi; Nuosu\",\n \"iu\": \"Inuktitut\",\n \"ie\": \"Interlingue; Occidental\",\n \"ia\": \"Interlingua (International Auxiliary Language Association)\",\n \"id\": \"Indonesian\",\n \"ik\": \"Inupiaq\",\n \"is\": \"Icelandic\",\n \"it\": \"Italian\",\n \"jv\": \"Javanese\",\n \"ja\": \"Japanese\",\n \"kl\": \"Kalaallisut; Greenlandic\",\n \"kn\": \"Kannada\",\n \"ks\": \"Kashmiri\",\n \"ka\": \"Georgian\",\n \"kr\": \"Kanuri\",\n \"kk\": \"Kazakh\",\n \"km\": \"Central Khmer\",\n \"ki\": \"Kikuyu; Gikuyu\",\n \"rw\": \"Kinyarwanda\",\n \"ky\": \"Kirghiz; Kyrgyz\",\n \"kv\": \"Komi\",\n \"kg\": \"Kongo\",\n \"ko\": \"Korean\",\n \"kj\": \"Kuanyama; Kwanyama\",\n \"ku\": \"Kurdish\",\n \"lo\": \"Lao\",\n \"la\": \"Latin\",\n \"lv\": \"Latvian\",\n \"li\": \"Limburgan; Limburger; Limburgish\",\n \"ln\": \"Lingala\",\n \"lt\": \"Lithuanian\",\n \"lb\": \"Luxembourgish; Letzeburgesch\",\n \"lu\": \"Luba-Katanga\",\n \"lg\": \"Ganda\",\n \"mk\": \"Macedonian\",\n \"mh\": \"Marshallese\",\n \"ml\": \"Malayalam\",\n \"mi\": \"Maori\",\n \"mr\": \"Marathi\",\n \"ms\": \"Malay\",\n \"Mi\": \"Micmac\",\n \"mk\": \"Macedonian\",\n \"mg\": \"Malagasy\",\n \"mt\": \"Maltese\",\n \"mn\": \"Mongolian\",\n \"mi\": \"Maori\",\n \"ms\": \"Malay\",\n \"my\": \"Burmese\",\n \"na\": \"Nauru\",\n \"nv\": \"Navajo; Navaho\",\n \"nr\": \"Ndebele, South; South Ndebele\",\n \"nd\": \"Ndebele, North; North Ndebele\",\n \"ng\": \"Ndonga\",\n \"ne\": \"Nepali\",\n \"nl\": \"Dutch; Flemish\",\n \"nn\": \"Norwegian Nynorsk; Nynorsk, Norwegian\",\n \"nb\": \"Bokmål, Norwegian; Norwegian Bokmål\",\n \"no\": \"Norwegian\",\n \"oc\": \"Occitan (post 1500)\",\n \"oj\": \"Ojibwa\",\n \"or\": \"Oriya\",\n \"om\": \"Oromo\",\n \"os\": \"Ossetian; Ossetic\",\n \"pa\": \"Panjabi; Punjabi\",\n \"fa\": \"Persian\",\n \"pi\": \"Pali\",\n \"pl\": \"Polish\",\n \"pt\": \"Portuguese\",\n \"ps\": \"Pushto; Pashto\",\n \"qu\": \"Quechua\",\n \"rm\": \"Romansh\",\n \"ro\": \"Romanian; Moldavian; Moldovan\",\n \"ro\": \"Romanian; Moldavian; Moldovan\",\n \"rn\": \"Rundi\",\n \"ru\": \"Russian\",\n \"sg\": \"Sango\",\n \"sa\": \"Sanskrit\",\n \"si\": \"Sinhala; Sinhalese\",\n \"sk\": \"Slovak\",\n \"sk\": \"Slovak\",\n \"sl\": \"Slovenian\",\n \"se\": \"Northern Sami\",\n \"sm\": \"Samoan\",\n \"sn\": \"Shona\",\n \"sd\": \"Sindhi\",\n \"so\": \"Somali\",\n \"st\": \"Sotho, Southern\",\n \"es\": \"Spanish; Castilian\",\n \"sq\": \"Albanian\",\n \"sc\": \"Sardinian\",\n \"sr\": \"Serbian\",\n \"ss\": \"Swati\",\n \"su\": \"Sundanese\",\n \"sw\": \"Swahili\",\n \"sv\": \"Swedish\",\n \"ty\": \"Tahitian\",\n \"ta\": \"Tamil\",\n \"tt\": \"Tatar\",\n \"te\": \"Telugu\",\n \"tg\": \"Tajik\",\n \"tl\": \"Tagalog\",\n \"th\": \"Thai\",\n \"bo\": \"Tibetan\",\n \"ti\": \"Tigrinya\",\n \"to\": \"Tonga (Tonga Islands)\",\n \"tn\": \"Tswana\",\n \"ts\": \"Tsonga\",\n \"tk\": \"Turkmen\",\n \"tr\": \"Turkish\",\n \"tw\": \"Twi\",\n \"ug\": \"Uighur; Uyghur\",\n \"uk\": \"Ukrainian\",\n \"ur\": \"Urdu\",\n \"uz\": \"Uzbek\",\n \"ve\": \"Venda\",\n \"vi\": \"Vietnamese\",\n \"vo\": \"Volapük\",\n \"cy\": \"Welsh\",\n \"wa\": \"Walloon\",\n \"wo\": \"Wolof\",\n \"xh\": \"Xhosa\",\n \"yi\": \"Yiddish\",\n \"yo\": \"Yoruba\",\n \"za\": \"Zhuang; Chuang\",\n \"zh\": \"Chinese\",\n \"zu\": \"Zulu\",\n }\n if name.lower() in languages:\n return languages[name.lower()]\n else:\n return name\n\n\ndef selectiveMerge(a, b):\n # Intelligently merge TMDB(a) and IMDB data(b):\n # assumptions: IMDB data is more compete, and accurate. However some information can be found only on TMDB - eg. adult\n # take unique infor from TMDB\n # also for People in particular (directors etc), merge data:\n # if a person category is found only in TMDB (eg. show creators, art directors) then take the category as is\n # if it exists on both or only IMDB, take IMDB data and selectively for the people that exist on both sites (with identical name)\n # import missing info from TMDB (eg. portrait, character name, billing order etc)\n\n # - Top level items:\n c = {}\n for ak, av in a.items():\n if ak in b:\n # +if a.has_non_empty_item and b.has_non_empty_item, prefer +b & children\n if b.get(ak):\n c[ak] = b[ak]\n else:\n c[ak] = av\n if type(av) is list:\n # +if is person(director,writer etc)\n if ak in [\n \"CreatedBy\",\n \"Directors\",\n \"Writers\",\n \"Producers\",\n \"Cinematographers\",\n \"ArtDirectors\",\n \"Cast\",\n ]:\n # if a.child.name == b.child.name == non_empty then\n for a_index, person in enumerate(av):\n b_index = find(b[ak], \"Name\", person[\"Name\"])\n if b_index >= 0: # found inside b!\n # so there is something matching: get all elements from matchin b element, unless ones unique to a\n tmpItem = b[ak][b_index]\n # + take exta fields from a (TMDBID,PortraitURL,Order,Character)\n if av[a_index].get(\"TMDBID\"):\n tmpItem[\"TMDBID\"] = av[a_index].get(\"TMDBID\")\n if av[a_index].get(\"PortraitURL\"):\n if av[a_index][\"PortraitURL\"] is not None:\n tmpItem[\"PortraitURL\"] = av[a_index].get(\n \"PortraitURL\"\n )\n else:\n print(\n \"Bug, Shouldn't be none, should had already been filtered out\"\n )\n if \"Order\" in av[a_index]:\n tmpItem[\"Order\"] = av[a_index].get(\"Order\")\n if av[a_index].get(\"Character\"):\n tmpItem[\"Character\"] = av[a_index].get(\"Character\")\n c_index = find(c[ak], \"Name\", person[\"Name\"])\n c[ak][c_index] = tmpItem\n else: # not found inside b, unique in a - in that case, ignore completely, a (TMDB) is not the accurate one!\n #\n # !!!!!!!!!!!! this will have to change if at some point I get rid of IMDB.\n #\n pass\n else:\n # +if is language: do nothing (retain b)\n # +if is countries: do nothing (retain b)\n # +if is language: do nothing (retain b)\n # +if is keywords: do nothing (retain b)\n # + same for taglines - what else have i forgotten? hmm...\n pass\n else:\n # +unique, non-empty items from a, should be copied +a?\n c[ak] = av\n for bk, bv in b.items():\n # +unique, non-empty items from b, should be copied +b?\n if not bk in a:\n # 3. items unique to b? copy them to a!\n c[bk] = bv\n return c\n\n\ndef update(d, u):\n #\n # By Alex Martelli\n # https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth\n # (some people say there is some bug in it)\n #\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d\n\n\ndef find(lst, key, value):\n # find index within list of dictionaries, by searching for a specific key's value\n # https://stackoverflow.com/questions/4391697/find-the-index-of-a-dict-within-a-list-by-matching-the-dicts-value\n for i, dic in enumerate(lst):\n if dic[key] == value:\n return i\n return -1\n\n\ndef PreCleanUpFileName(Filename):\n tmpRegex = re.compile(r\"\\+\")\n Result = tmpRegex.sub(r\" \", Filename)\n return Result\n\n\ndef PostCleanUpFileName(Filename):\n tmpRegex = re.compile(r\"([sS]\\d+)|(\\.)|(\\[.*\\])\")\n Result = tmpRegex.sub(r\" \", Filename)\n return Result\n\n\ndef StripExtension(Filename):\n if includeFoldersInValidFileTypes and Filename.endswith(os.sep):\n Filename = os.path.normpath(Filename)\n if not Filename.endswith(\".ffiles\"):\n return Filename\n # else it's either .ffile directory or real file with (assumed) extension\n Result = os.path.splitext(Filename)[0]\n return Result\n\n\ndef ExtractExtension(Filename):\n # BUGGY 197239012\n Result = os.path.splitext(Filename)[1]\n return Result\n\n\ndef GetMovieDirectory(BaseDir, MovieName):\n if MovieName != \"\":\n NewDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n if os.path.exists(NewDirectory):\n return NewDirectory\n return None\n\n\ndef doesFileExist(BaseDir, MovieName, FileName):\n if MovieName != \"\" and BaseDir != \"\" and FileName != \"\":\n Directory = os.path.join(BaseDir, MovieName + \".ffiles\")\n File = os.path.join(Directory, FileName)\n if os.path.exists(File):\n return True\n else:\n return False\n else:\n print(\n \"Bug, testing for existence of file, without setting file, folder or moviename - why?\"\n )\n return False\n\n\ndef isValidMovieFile(BaseDir, MovieNameWithExtension):\n FileURI = os.path.join(os.getcwd(), BaseDir, MovieNameWithExtension)\n if os.path.isdir(FileURI):\n if includeFoldersInValidFileTypes and not MovieNameWithExtension.endswith(\n \".ffiles\"\n ):\n return True # is folder, and not a cache folder. Assume it's a movie!\n else:\n return (\n False\n ) # is cache folder or we don't care about folders (in case where you have same cache and source folders)\n else:\n # StrippedMovieName = StripExtension(MovieNameWithExtension)\n StrippedExtension = ExtractExtension(MovieNameWithExtension)\n if StrippedExtension.lower() in ValidFileTypes:\n return True\n else:\n return False\n\n\ndef MakeDirectory(BaseDir, MovieName, dotExtension=\".ffiles\"):\n if BaseDir != \"\":\n NewDirectory = os.path.join(BaseDir, MovieName + dotExtension)\n if not os.path.exists(NewDirectory):\n os.makedirs(NewDirectory)\n return None\n\n\ndef ReplaceWSwithPlus(StringToReplace):\n # replace whitespace( ) with plus(+)\n tmpRegex = re.compile(r\"\\s\")\n Result = tmpRegex.sub(r\"\\+\", StringToReplace)\n return Result\n\n\ndef RemoveAuthorFromPlot(StringToReplace):\n # replace whitespace( ) with plus(+)\n Result = \"\"\n if StringToReplace:\n tmpRegex = re.compile(r\"(.*)::.*$\")\n Result = tmpRegex.sub(r\"\\1\", StringToReplace)\n return Result\n\n\ndef ConvertCountryCodeToName(Countries):\n if Countries:\n for index in range(len(Countries)):\n Country = Countries[index]\n if len(Country) == 2:\n Countries[index] = CountryNameByCode(Country)\n return Countries\n\n\ndef ConvertLanguageCodeToName(Languages):\n if Languages:\n for index in range(len(Languages)):\n Lang = Languages[index]\n if len(Lang) == 2:\n Languages[index] = LanguageNameByCode(Lang)\n return Languages\n\n\ndef isIMDBURL(URLString):\n # if this String an IMDB movie url?\n extractedID = ExtractIDfromIMDBURL(URLString)\n if extractedID > 0:\n return True\n else:\n return False\n\n\ndef ExtractIDfromIMDBURL(IMDBurl):\n # extract the numbered movie ID from the imdb url\n # https://www.imdb.com/title/tt6628102/\n tmpRegex = re.compile(r\"imdb\\.com.*\\/tt([0-9]*).*$\")\n extractedID = tmpRegex.search(IMDBurl)\n # print(\"url_raw:\" + str(IMDBurl))\n # print(\"movieID_raw:\" + str(extractedID))\n if extractedID == None:\n return -1\n else:\n # print(\"movieID:\" + str(extractedID.group(1)) )\n return int(extractedID.group(1))\n\n\ndef ParseDuckDuckGoSearchResults(PageText):\n # pprint.pprint(\"PAGE --------------------:\\n\" + PageText)\n tmpSoup = bs4.BeautifulSoup(PageText, features=\"html.parser\")\n tmpResults = tmpSoup.select(\"#links > div.result .result__a\")\n try:\n tmpFirstResult = tmpResults[0]\n except:\n print(\n \"bug, DuckDuckGo did not return results for query,\"\n \"page's HTML probably has changed, need to adjust\"\n )\n return \"Error\" # error\n link = tmpFirstResult.get(\"href\")\n print(link)\n url_obj = urlparse(link)\n parsed_url = parse_qs(url_obj.query).get(\"uddg\", \"\")\n if parsed_url:\n movieID = ExtractIDfromIMDBURL(unquote(parsed_url[0]))\n # print(unquote(parsed_url[0]))\n # pprint.pprint(link)\n return movieID\n else:\n return -1\n\n\ndef FetchGoogleSearchResults(MovieSearchString):\n MovieID = None\n try:\n for GGLurl in googlesearch.search(MovieSearchString, stop=20):\n if isIMDBURL(GGLurl):\n MovieID = ExtractIDfromIMDBURL(GGLurl)\n # print(\"ID is \"+str(MovieID)+\" (\"+GGLurl+\")\")\n return MovieID\n except urllib.error.URLError:\n print(\"Could not connect to Google - Perhaps you are offline?\")\n return -1 # -1: no ID found, perhaps not a movie?\n\n\ndef FetchDuckDuckGoSearchResults(MovieSearchString):\n ConvertedMovieName = ReplaceWSwithPlus(MovieSearchString)\n # print(\"encoded name:\"+quote_plus(MovieSearchString))\n DDGurl = (\n \"https://duckduckgo.com/html/?q=imdb+\"\n + quote_plus(MovieSearchString)\n + \"&sites=www.imdb.com\"\n + \"&kp=-2&norw=1\"\n )\n print(\"requesting url:\" + DDGurl)\n tmpreqresp = requests.get(DDGurl)\n # tmpreqresp.raise_for_status()\n if tmpreqresp.status_code != requests.codes.ok:\n print(\"duckduckgo did not return correctly\")\n MovieID = -1\n else:\n MovieID = ParseDuckDuckGoSearchResults(tmpreqresp.text)\n if MovieID > 0:\n # print(\"found\")\n return MovieID\n else:\n # print(\"Not found\")\n return -1\n\n\ndef GetMovieIDfromWeb(MovieName, Year=\"\", AdditionalInfo=\"\", Provider=\"google\"):\n # https://duckduckgo.com/?q=imdb+QUERY&kp=-2\n # kp=-2 safesearching OFF\n # provider service \"google\" or \"duckduckgo\"\n SearchQuery = MovieName + \" \" + str(Year) + \" \" + AdditionalInfo\n if Provider == \"google\":\n MovieID = FetchGoogleSearchResults(SearchQuery)\n # pprint.pprint(tmpreqresp)\n elif Provider == \"duckduckgo\":\n MovieID = FetchDuckDuckGoSearchResults(SearchQuery)\n if MovieID > 0:\n # print(\"found\")\n return MovieID\n else:\n # print(\"Not found\")\n return -1\n\n\ndef isMissingDataFile(BaseDir, MovieName):\n # if is a new Movie File or moviedata is missing, return True (for refetch etc)\n if MovieName != \"\":\n MovieDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n MovieDataFile = os.path.join(MovieDirectory, \"moviedata.json\")\n if not os.path.exists(MovieDirectory):\n return True # new movie data is missing!\n elif os.path.exists(MovieDataFile):\n return False # Datafile exists\n else:\n return True # missing datafile but folder exists! possibly corrupt\n return None\n\n\ndef isMissingFolder(BaseDir, MovieName):\n if MovieName != \"\":\n MovieDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n if not os.path.exists(MovieDirectory):\n return True # yes, it's missing the poster file!\n else:\n return False # no, the file exists\n\n\ndef isMissingPosterFile(BaseDir, MovieName):\n if MovieName != \"\":\n MovieDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n MoviePosterFile = os.path.join(MovieDirectory, \"poster.jpg\")\n if not os.path.exists(MoviePosterFile):\n return True # yes, it's missing the poster file!\n else:\n return False # no, the file exists\n\n\ndef isMissingBackdropFile(BaseDir, MovieName):\n if MovieName != \"\":\n MovieDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n MovieBackdropFile = os.path.join(MovieDirectory, \"backdrop.jpg\")\n if not os.path.exists(MovieBackdropFile):\n return True # yes, it's missing the backdrop file!\n else:\n return False # no, the file exists\n\n\ndef isFolderOrphan(SourceFilesList, CacheDir, StrippedMovieName):\n if StrippedMovieName != \"\" and len(SourceFilesList) > 0:\n # StrippedMovieName = StripExtension(MovieNameWithExtension)\n # MovieDirectory = os.path.join(CacheDir,StrippedMovieName+\".ffiles\")\n # MovieFile = os.path.join(SourceDir,StrippedMovieName)\n # MovieDataFile = os.path.join(MovieDirectory,\"moviedata.json\")\n GeneratedNameVariants = {StrippedMovieName + fn for fn in ValidFileTypes}\n if includeFoldersInValidFileTypes:\n GeneratedNameVariants.add(\n StrippedMovieName + os.sep\n ) # FUTURE192938: for folder testing, also create a variant with trailing /\n # assumes that if the file ends in / or \\ depending on the OS, it was a directory.\n # reason? because I embedded \"/\" at the end of folders so that I can tell them apart.\n Common = GeneratedNameVariants.intersection(SourceFilesList)\n if len(Common) == 0:\n return True # yes, it's an orphan!\n else:\n return False # no, folder has paired Movie file - no orphan here\n\n\ndef DeleteFolder(BaseDir, MovieName):\n if MovieName != \"\":\n MovieDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n if os.path.exists(MovieDirectory):\n shutil.rmtree(MovieDirectory)\n # print(\"mock del: [\" + MovieDirectory+\"]\")\n return None\n else:\n print(\"Bug! Deleting non-existent folder. Why?\")\n return None\n\n\ndef BuildGenresList(MoviesCatalog):\n tmpGenresList = []\n for Movie in MoviesCatalog:\n # print(Movie['DateAdded'])\n if \"Genres\" in Movie:\n tmpGenresList += Movie[\"Genres\"]\n tmpGenresList = list(set(tmpGenresList))\n tmpGenresList.sort()\n return tmpGenresList\n\n\ndef BuildIndex(MoviesCatalog):\n file_loader = FileSystemLoader(TemplatesRelativePath)\n env = Environment(loader=file_loader)\n GenresList = BuildGenresList(MoviesCatalog)\n template = env.get_template(\"index.htmltemplate\")\n output = template.render(\n genres=GenresList,\n foundmovies=True,\n movies=MoviesCatalog,\n CachePath=CacheRelativePath,\n HowManyActorsShouldDisplay=constHowManyActorsShouldDisplay,\n )\n indexfile = open(\"index.html\", \"w\")\n indexfile.write(output)\n indexfile.close\n return None\n\n\ndef CreateMovieDataFile(BaseDir, MovieName, MovieData, ForceRebuild=False):\n # create data file for movie\n # print(MovieName)\n # print(len(MovieData))\n # print(BaseDir)\n if MovieName != \"\":\n MovieDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n MovieDataFile = os.path.join(MovieDirectory, \"moviedata.json\")\n if not os.path.exists(MovieDirectory):\n MakeDirectory(BaseDir, MovieName)\n if os.path.exists(MovieDataFile):\n if ForceRebuild == False:\n # complain, why does overwritting happen?\n print(\"bug: overwritting movie data, why?\")\n os.remove(MovieDataFile)\n # with open(MovieDataFile, 'w') as JSONFile:\n # \tjson.dump(MovieData, JSONFile)\n with io.open(MovieDataFile, \"w\", encoding=\"utf8\") as json_file:\n json.dump(MovieData, json_file, ensure_ascii=False)\n return True\n return False\n\n\ndef GenerateGalleryThumbnail(\n BaseDir,\n MovieName,\n input_file=\"poster.jpg\",\n output_file=\"poster_thumbnail.jpg\",\n basewidth=256,\n):\n return GenerateResizedImage(BaseDir, MovieName, input_file, output_file, basewidth)\n\n\ndef GenerateResizedImage(BaseDir, MovieName, input_file, output_file, basewidth):\n if input_file == None or output_file == None or basewidth < 1:\n print(\"Bug, forgot to set filenames, and basewidth\")\n return -1\n if MovieName != \"\":\n MovieDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n ImageInputFile = os.path.join(MovieDirectory, input_file)\n if os.path.exists(ImageInputFile):\n ImageOutputFile = os.path.join(MovieDirectory, output_file)\n img = Image.open(ImageInputFile)\n wpercent = basewidth / float(img.size[0])\n hsize = int((float(img.size[1]) * float(wpercent)))\n img = img.resize((basewidth, hsize), Image.ANTIALIAS)\n img.save(ImageOutputFile)\n return 1\n else:\n print(\"Bug, Generating Thumbnail for non-existent file?\")\n return -1\n\n\ndef UpdateMovieDataFile(BaseDir, MovieName, MovieData):\n if MovieName != \"\":\n MovieDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n MovieDataFile = os.path.join(MovieDirectory, \"moviedata.json\")\n\n if os.path.exists(MovieDataFile):\n os.rename(MovieDataFile, MovieDataFile + \".bak\")\n with io.open(MovieDataFile, \"w\", encoding=\"utf8\") as json_file:\n json.dump(MovieData, json_file, ensure_ascii=False)\n else:\n print(\"bug: reading non-existent file, yet you asked for update! why?\")\n return None\n return None\n\n\ndef RestoreMovieDataFile(BaseDir, MovieName, ignore_not_found=False):\n # returns movie data object as read by the file\n if MovieName != \"\":\n MovieDirectory = os.path.join(BaseDir, MovieName + \".ffiles\")\n MovieDataFile = os.path.join(MovieDirectory, \"moviedata.json\")\n if os.path.exists(MovieDataFile):\n # with open(MovieDataFile) as JSONFile:\n # \tMovieData = json.load(JSONFile)#.decode('unicode-escape')\n with io.open(MovieDataFile, \"r\", encoding=\"utf8\") as json_file:\n MovieData = json.load(json_file)\n return MovieData\n else:\n if not ignore_not_found:\n print(\"bug: reading non-existent file and wasn't suppressed, why?\")\n return None\n return None\n\n\ndef GetHeadshotURL(personID, Condition=True, Provider=\"tmdb\"):\n if (\n Condition == False\n ): # should I fetch data? condition eg. if counter < 7, do! else --> stop fetching\n # print(\"reached condition - not displaying person \"+ personID)\n return None\n if Provider == \"imdb\":\n tmpPerson = ia.get_person(personID)\n elif Provider == \"tmdb\":\n # print(\"mock fetch\")\n # print(type(personID))\n tmpResults = getTMDBDetailsfromIMDBID(personID, \"person\")\n # print(\"+\")\n if tmpResults:\n\n tmpPerson = {\n \"full-size headshot\": tmpResults[\"profile_path\"],\n \"name\": tmpResults[\"name\"],\n \"id\": personID,\n }\n else:\n tmpPerson = {}\n # print(\"no-results! for person \"+personID)\n # print(\"/\")\n # print(Person.default_info)\n # print(ia.get_person_infoset())\n # print(tmpPerson.current_info)\n # pprint.pprint(tmpPerson)\n # print(type(tmpPerson))\n # for key in tmpPerson.keys():\n # \tprint(\"key is [\"+key+\"]\")\n # print(\"-\")\n if \"full-size headshot\" in tmpPerson.keys():\n tmpImageURL = ConstructTMDBImageURL(tmpPerson[\"full-size headshot\"], \"original\")\n if tmpImageURL:\n if re.match(r\"^https?://.*\\.jpg$\", tmpImageURL):\n # print(tmpPerson['name'])\n # print(tmpPerson['full-size headshot'])\n return tmpImageURL\n else:\n print(\n \"Error - found headshot for \"\n + tmpPerson[\"name\"]\n + \" but format is not URL but [\"\n + tmpPerson[\"full-size headshot\"]\n + \"]\"\n )\n return None\n # tmpPerson['ImdbID'] = tmpDirector['personid']\n\n\ndef WrapTMDBDataInIMDB(IMDBMovieID):\n if IMDBMovieID == None:\n print(\"Bug, why try fetching TMDBData if movie wasn't found?\")\n return\n MovieInformation = {}\n tmpData = getTMDBIDfromIMDBID(IMDBMovieID, \"movie\")\n movieID = None\n kind = None\n # HANDLE NO DATA BUG 91239\n if tmpData and \"id\" in tmpData:\n movieID = tmpData.get(\"id\")\n if tmpData and \"kind\" in tmpData:\n kind = tmpData.get(\"kind\")\n if not (kind == \"movie\" or kind == \"tv\"):\n print(\"Bug, getTMDBIDfromIMDBID returned wrong kind of data, perhaps no movie?\")\n # HANDLE NO DATA BUG 91239\n return\n # https://api.themoviedb.org/3/movie/109443?api_key=096187be7a5391bfa9843173e059137d\n # &language=en-US&append_to_response\n # =external_ids,keywords,credits,videos,images&include_image_language=null,en,fr,jp,cn,de,es,it\n # print(\"Fetching TMDB Data for \"+ str(IMDBMovieID))\n\n requestURL = (\n \"https://api.themoviedb.org/3/\"\n + kind\n + \"/\"\n + str(movieID)\n + \"?api_key=\"\n + myTMDB_APIkey\n + \"&language=en-US&append_to_response\"\n + \"=external_ids,alternative_titles,keywords,credits,videos,images&include_image_language=null,en,fr,jp,cn,de,es,it\"\n )\n response = requests.get(requestURL)\n # for tmpkey in response.headers.keys():\n # \tprint(tmpkey+\":[\"+response.headers[tmpkey]+\"]\")\n\n # \tprint(response.headers['X-RateLimit-Limit'])\n response.raise_for_status()\n\n if response.status_code == requests.codes.ok:\n current_time = time.time()\n if \"X-RateLimit-Remaining\" in response.headers and int(response.headers[\"X-RateLimit-Remaining\"]) <= 1:\n while int(response.headers[\"X-RateLimit-Reset\"]) + 1 > time.time():\n print(\"TMDB:Reached rate-limit. Waiting...\")\n time.sleep(1)\n tmpJSON = json.loads(response.text)\n # for key in tmpJSON.keys():\n # \tprint(\"- \"+ key)\n # \tprint(tmpJSON.get(key))\n MovieInformation[\"ImdbID\"] = IMDBMovieID\n MovieInformation[\"TMDBID\"] = movieID\n if \"title\" in tmpJSON:\n MovieInformation[\"Title\"] = tmpJSON.get(\"title\")\n MovieInformation[\"OtherTitles\"] = []\n if \"original_title\" in tmpJSON:\n MovieInformation[\"OriginalTitle\"] = tmpJSON.get(\"original_title\")\n MovieInformation[\"OtherTitles\"].append(\n tmpJSON.get(\"original_title\") + \" (original)\"\n )\n if \"original_name\" in tmpJSON:\n MovieInformation[\"OriginalTitle\"] = tmpJSON.get(\"original_name\")\n MovieInformation[\"OtherTitles\"].append(\n tmpJSON.get(\"original_name\") + \" (original)\"\n )\n if \"alternative_titles\" in tmpJSON:\n if \"alternative_titles\" in tmpJSON:\n tmpTag = None\n if \"titles\" in tmpJSON[\"alternative_titles\"]:\n tmpTag = \"titles\"\n if \"results\" in tmpJSON[\"alternative_titles\"]:\n tmpTag = \"results\"\n # pprint.pprint(tmpJSON.get('alternative_titles'))\n for tmpD in tmpJSON[\"alternative_titles\"][tmpTag]:\n title_type = tmpD.get(\"type\")\n title_iso = tmpD.get(\"iso_3166_1\")\n title_par = \"\"\n if title_type or title_iso:\n title_par = \" (\"\n if title_type:\n title_par += title_type\n if title_iso:\n title_par += \" \" + title_iso\n title_par += \")\"\n\n MovieInformation[\"OtherTitles\"].append(tmpD.get(\"title\") + title_par)\n\n if \"tagline\" in tmpJSON:\n MovieInformation[\"Taglines\"] = []\n MovieInformation[\"Taglines\"].append(tmpJSON.get(\"tagline\"))\n\n # Kind: movie or tv? needs to be extracted earlier in the process\n\n if \"release_date\" in tmpJSON:\n MovieInformation[\"ReleaseYear\"] = tmpJSON.get(\"release_date\")[:4]\n MovieInformation[\"Languages\"] = []\n if \"spoken_languages\" in tmpJSON:\n for tmpD in tmpJSON.get(\"spoken_languages\"):\n MovieInformation[\"Languages\"].append(tmpD[\"iso_639_1\"])\n if \"original_language\" in tmpJSON:\n MovieInformation[\"Languages\"].append(tmpJSON[\"original_language\"])\n MovieInformation[\"Countries\"] = []\n if \"production_countries\" in tmpJSON:\n for tmpD in tmpJSON.get(\"production_countries\"):\n MovieInformation[\"Countries\"].append(tmpD[\"iso_3166_1\"])\n if \"original_country\" in tmpJSON:\n MovieInformation[\"Countries\"].append(tmpJSON[\"original_country\"])\n # Metascore: not available for TMDB\n # MetacriticURL: not available for TMDB\n\n if \"runtime\" in tmpJSON:\n MovieInformation[\"Duration\"] = tmpJSON.get(\"runtime\")\n if \"genres\" in tmpJSON:\n MovieInformation[\"Genres\"] = []\n for tmpD in tmpJSON.get(\"genres\"):\n MovieInformation[\"Genres\"].append(tmpD[\"name\"].lower())\n if \"overview\" in tmpJSON:\n MovieInformation[\"Synopsis\"] = tmpJSON.get(\"overview\")\n\n # Plot: TMDB has only overview - no \"plot outline\", nor user \"plot\"s\n if \"created_by\" in tmpJSON:\n MovieInformation[\"CreatedBy\"] = []\n for tmpD in tmpJSON.get(\"created_by\"):\n tmp = {\"Name\": tmpD.get(\"name\"), \"TMDBID\": tmpD.get(\"id\")}\n if tmpD.get(\"profile_path\") is not None:\n tmp[\"PortraitURL\"] = tmpD.get(\"profile_path\")\n MovieInformation[\"CreatedBy\"].append(tmp)\n if \"credits\" in tmpJSON:\n if \"crew\" in tmpJSON[\"credits\"]:\n MovieInformation[\"Directors\"] = []\n MovieInformation[\"Writers\"] = []\n MovieInformation[\"Producers\"] = []\n MovieInformation[\"Cinematographers\"] = []\n MovieInformation[\"ArtDirectors\"] = []\n for tmpD in tmpJSON[\"credits\"].get(\"crew\"):\n if tmpD.get(\"job\") == \"Director\":\n tmp = {\"Name\": tmpD.get(\"name\"), \"TMDBID\": tmpD.get(\"id\")}\n if tmpD.get(\"profile_path\") is not None:\n tmp[\"PortraitURL\"] = tmpD.get(\"profile_path\")\n MovieInformation[\"Directors\"].append(tmp)\n if (\n tmpD.get(\"job\") == \"Screenplay\"\n or tmpD.get(\"job\") == \"Writer\"\n or tmpD.get(\"job\") == \"Author\"\n ):\n tmp = {\"Name\": tmpD.get(\"name\"), \"TMDBID\": tmpD.get(\"id\")}\n if tmpD.get(\"profile_path\") is not None:\n tmp[\"PortraitURL\"] = tmpD.get(\"profile_path\")\n MovieInformation[\"Writers\"].append(tmp)\n if tmpD.get(\"job\") == \"Producer\":\n tmp = {\"Name\": tmpD.get(\"name\"), \"TMDBID\": tmpD.get(\"id\")}\n if tmpD.get(\"profile_path\") is not None:\n tmp[\"PortraitURL\"] = tmpD.get(\"profile_path\")\n MovieInformation[\"Producers\"].append(tmp)\n if tmpD.get(\"job\") == \"Art Direction\":\n tmp = {\"Name\": tmpD.get(\"name\"), \"TMDBID\": tmpD.get(\"id\")}\n if tmpD.get(\"profile_path\") is not None:\n tmp[\"PortraitURL\"] = tmpD.get(\"profile_path\")\n MovieInformation[\"ArtDirectors\"].append(tmp)\n if (\n tmpD.get(\"job\") == \"Cinematography\"\n or tmpD.get(\"job\") == \"Director of Photography\"\n ):\n tmp = {\"Name\": tmpD.get(\"name\"), \"TMDBID\": tmpD.get(\"id\")}\n if tmpD.get(\"profile_path\") is not None:\n tmp[\"PortraitURL\"] = tmpD.get(\"profile_path\")\n MovieInformation[\"Cinematographers\"].append(tmp)\n if \"cast\" in tmpJSON[\"credits\"]:\n MovieInformation[\"Cast\"] = []\n for tmpD in tmpJSON[\"credits\"].get(\"cast\"):\n tmp = {\"Name\": tmpD.get(\"name\"), \"TMDBID\": tmpD.get(\"id\")}\n if tmpD.get(\"profile_path\") is not None:\n tmp[\"PortraitURL\"] = tmpD.get(\"profile_path\")\n if \"order\" in tmpD:\n tmp[\"Order\"] = tmpD.get(\"order\")\n if tmpD.get(\"character\") is not None:\n tmp[\"Character\"] = tmpD.get(\"character\")\n MovieInformation[\"Cast\"].append(tmp)\n if \"keywords\" in tmpJSON and \"keywords\" in tmpJSON[\"keywords\"]:\n MovieInformation[\"Keywords\"] = []\n for tmpD in tmpJSON[\"keywords\"].get(\"keywords\"):\n tmpStr = tmpD[\"name\"].strip().replace(\" \", \"-\")\n MovieInformation[\"Keywords\"].append(tmpStr)\n if \"videos\" in tmpJSON and \"results\" in tmpJSON[\"videos\"]:\n for tmpD in tmpJSON[\"videos\"].get(\"results\"):\n if tmpD[\"iso_639_1\"] == \"en\" or not MovieInformation[\"TrailerID\"]:\n if (\n tmpD[\"site\"] == \"YouTube\"\n and tmpD[\"type\"] == \"Trailer\"\n and len(tmpD[\"key\"]) > 8\n ): # should be 11, but arbirarily I asked 8 minimun :-p\n MovieInformation[\"TrailerID\"] = tmpD[\"key\"]\n if \"backdrop_path\" in tmpJSON:\n MovieInformation[\"TMDBBackdrop\"] = tmpJSON.get(\"backdrop_path\")\n if \"poster_path\" in tmpJSON:\n MovieInformation[\"TMDBPoster\"] = tmpJSON.get(\"poster_path\")\n if \"budget\" in tmpJSON:\n MovieInformation[\"TMDBBudget\"] = tmpJSON.get(\"budget\")\n MovieInformation[\"Images\"] = []\n if \"images\" in tmpJSON and \"backdrops\" in tmpJSON[\"images\"]:\n for tmpD in tmpJSON[\"images\"][\"backdrops\"]:\n MovieInformation[\"Images\"].append(tmpD.get(\"file_path\"))\n if \"images\" in tmpJSON and \"posters\" in tmpJSON[\"images\"]:\n for tmpD in tmpJSON[\"images\"][\"posters\"]:\n MovieInformation[\"Images\"].append(tmpD.get(\"file_path\"))\n\n # tmpMovieDictionary['Poster'] obsolete - I wasn't using this particular one anyways (I'd take the one from TMDB)\n # tmpMovieDictionary['ImdbRating'] - IMDB specific\n # tmpMovieDictionary['ImdbVotes'] - IMDB specific\n # tmpMovieDictionary['ImdbVoteDistribution'] - IMDB specific\n # tmpMovieDictionary['ImdbVoteDemographics'] - IMDB specific\n # 'RottenTomatoesRating'\n # 'RottenTomatoesVotes'\n\n if \"popularity\" in tmpJSON:\n MovieInformation[\"TMDBPopularity\"] = tmpJSON.get(\"popularity\")\n if \"adult\" in tmpJSON:\n MovieInformation[\"Adult\"] = tmpJSON.get(\"adult\")\n if \"vote_count\" in tmpJSON:\n MovieInformation[\"TMDBVotes\"] = tmpJSON.get(\"vote_count\")\n if \"vote_average\" in tmpJSON:\n MovieInformation[\"TMDBRating\"] = tmpJSON.get(\"vote_average\")\n\n # pprint.pprint(MovieInformation)\n return MovieInformation\n\n\ndef FetchMovieDataIMDBLite(MovieID, getPeople=False):\n #\n # \tfetch only basic movie data, so that you can complement the\n # \tdata from TMDB - FetchMovieData does thes oposite: fetches most\n # \tdata from IMDB and only minimal from TMDB. In case you want to fetch\n # \tpeople data (directors, actors etc) choose this option - this is for\n # \tmovies that couldn't be found in TMDB so you can not get the ORDERED\n # \tcast list from there.\n #\n try:\n movieData = ia.get_movie(str(MovieID)) # interstellar 0816692\n ia.update(\n movieData, info=[\"keywords\", \"vote details\", \"taglines\", \"critic reviews\"]\n )\n except IMDbError as e:\n print(e)\n tmpMovieDictionary = {}\n tmpMovieDictionary.clear()\n # for key in movieData.keys():\n # \tprint(\"---------\"+key+\"-----------\")\n # \tpprint.pprint(movieData[key])\n\n # from movie 'main' information_set\n tmpMovieDictionary[\"Title\"] = movieData.get(\"title\")\n tmpMovieDictionary[\"OtherTitles\"] = []\n if \"akas\" in movieData:\n tmpMovieDictionary[\"OtherTitles\"] = copy.copy(movieData.get(\"akas\"))\n if \"taglines\" in movieData:\n tmpMovieDictionary[\"Taglines\"] = copy.copy(movieData.get(\"taglines\"))\n tmpMovieDictionary[\"Kind\"] = movieData.get(\"kind\")\n tmpMovieDictionary[\"ReleaseYear\"] = movieData.get(\"year\")\n tmpMovieDictionary[\"Languages\"] = movieData.get(\"languages\")\n tmpMovieDictionary[\"Countries\"] = movieData.get(\"countries\")\n if \"metascore\" in movieData:\n tmpMovieDictionary[\"Metascore\"] = copy.copy(movieData.get(\"metascore\"))\n if \"metacritic url\" in movieData:\n tmpMovieDictionary[\"MetacriticUrl\"] = copy.copy(movieData.get(\"metacritic url\"))\n if \"runtimes\" in movieData:\n tmpMovieDictionary[\"Duration\"] = int(\n movieData.get(\"runtimes\")[0]\n ) # is it list?\n tmpMovieDictionary[\"Genres\"] = movieData.get(\"genres\").copy()\n if \"plot outline\" in movieData:\n tmpMovieDictionary[\"Synopsis\"] = movieData.get(\"plot outline\")\n if movieData.get(\"plot\")[0]:\n tmpMovieDictionary[\"Plot\"] = movieData.get(\"plot\")[0]\n if \"keywords\" in movieData:\n tmpMovieDictionary[\"Keywords\"] = copy.copy(movieData.get(\"keywords\"))\n tmpMovieDictionary[\"ImdbID\"] = MovieID\n if \"rating\" in movieData:\n tmpMovieDictionary[\"ImdbRating\"] = movieData.get(\"rating\")\n if \"votes\" in movieData:\n tmpMovieDictionary[\"ImdbVotes\"] = movieData.get(\"votes\")\n if \"number of votes\" in movieData:\n tmpMovieDictionary[\"ImdbVoteDistribution\"] = copy.copy(\n movieData.get(\"number of votes\")\n )\n if \"demographics\" in movieData:\n tmpMovieDictionary[\"ImdbVoteDemographics\"] = copy.deepcopy(\n movieData.get(\"demographics\")\n )\n\n tmpMovieDictionary[\"Directors\"] = []\n if \"directors\" in movieData:\n for tmpDirector in movieData.get(\"directors\"):\n if tmpDirector:\n tmpPerson = {\n \"Name\": tmpDirector[\"name\"],\n \"ImdbID\": tmpDirector.personID,\n }\n tmpMovieDictionary[\"Directors\"].append(tmpPerson)\n tmpMovieDictionary[\"Producers\"] = []\n if \"producers\" in movieData:\n for tmpProducer in movieData.get(\"producers\"):\n if tmpProducer:\n tmpPerson = {\n \"Name\": tmpProducer[\"name\"],\n \"ImdbID\": tmpProducer.personID,\n }\n tmpMovieDictionary[\"Producers\"].append(tmpPerson)\n tmpMovieDictionary[\"Writers\"] = []\n if \"writers\" in movieData:\n for tmpWriter in movieData.get(\"writers\"):\n if tmpWriter:\n tmpPerson = {\"Name\": tmpWriter[\"name\"], \"ImdbID\": tmpWriter.personID}\n tmpMovieDictionary[\"Writers\"].append(tmpPerson)\n tmpMovieDictionary[\"Cinematographers\"] = []\n if \"cinematographers\" in movieData:\n for tmpCinematographer in movieData.get(\"cinematographers\"):\n if tmpCinematographer:\n tmpPerson = {\n \"Name\": tmpCinematographer[\"name\"],\n \"ImdbID\": tmpCinematographer.personID,\n }\n tmpMovieDictionary[\"Cinematographers\"].append(tmpPerson)\n if getPeople == True:\n tmpMovieDictionary[\"Cast\"] = []\n if \"cast\" in movieData:\n for tmpCast in movieData.get(\"cast\"):\n if tmpCast:\n tmpPerson = {\"Name\": tmpCast[\"name\"], \"ImdbID\": tmpCast.personID}\n tmpMovieDictionary[\"Cast\"].append(tmpPerson)\n return tmpMovieDictionary\n\n\ndef ConstructIMDBIDfromNumber(movieID, idtype=\"movie\"):\n # takes 190590 and returns IMDB format of: tt0190590\n if type(movieID) == int:\n if idtype == \"movie\":\n # print(\"mp!\")\n return \"tt{:07d}\".format(movieID)\n elif idtype == \"person\":\n # print(\"ip!\")\n return \"nm{:10d}\".format(movieID)\n elif type(movieID) == str:\n if idtype == \"movie\":\n # print(\"ms!\")\n return \"tt\" + movieID\n elif idtype == \"person\":\n # print(\"is!\")\n return \"nm\" + movieID\n print(\n \"Bug - ConstructIMDBIDfromNumber shouldn't ever reach here. Movie/Person ID is:\"\n )\n print(movieID)\n print(type(movieID))\n return None\n\n\ndef ConstructTMDBImageURL(ImageName, ImageWidth=\"w780\"):\n # https://image.tmdb.org/t/p/w780/xu9zaAevzQ5nnrsXN6JcahLnG4i.jpg\n # 780/1280/original\n if ImageName and ImageWidth:\n tmpURL = \"https://image.tmdb.org/t/p/\" + ImageWidth + ImageName\n return tmpURL\n return None\n\n\ndef getTMDBTrailerLink(TMDBmovieID, Kind=\"movie\"):\n # https://api.themoviedb.org/3/find/tt0816692?api_key=096187be7a5391bfa9843173e059137d&language=en-US&external_source=imdb_id\n FoundTrailer = None\n if Kind != \"tv\":\n Kind = \"movie\"\n requestURL = (\n \"https://api.themoviedb.org/3/\"\n + Kind\n + \"/\"\n + str(TMDBmovieID)\n + \"/videos?api_key=\"\n + myTMDB_APIkey\n + \"&language=en-US\"\n )\n response = requests.get(requestURL)\n # response.raise_for_status()\n # print(\"1\")\n if response.status_code == requests.codes.ok:\n tmpJSON = json.loads(response.text)\n # print(\"2\")\n # pprint.pprint(tmpJSON)\n if tmpJSON[\"results\"]:\n # print(\"3\")\n # pprint.pprint(tmpJSON['results'])\n # TrailerIndex = next((index for (index, d) in enumerate(lst) if d[\"type\"].lower() == \"trailer\"), None)\n # TrailerIndex = find(tmpJSON['results'],\"type\",\"Trailer\")\n # print(TrailerIndex)\n # FoundTrailer = tmpJSON['results'][TrailerIndex]\n for tmpVideoFileResult in tmpJSON[\"results\"]:\n # print(\"4\")\n # pprint.pprint(tmpVideoFileResult)\n if (\n tmpVideoFileResult[\"site\"] == \"YouTube\"\n and tmpVideoFileResult[\"type\"] == \"Trailer\"\n and len(tmpVideoFileResult[\"key\"]) == 11\n ):\n # print(\"5\")\n FoundTrailer = tmpVideoFileResult[\"key\"]\n # print(\"found: \" +FoundTrailer )\n return FoundTrailer\n else:\n return None\n return FoundTrailer\n\n\ndef getTMDBDetailsfromIMDBID(movieID, idtype=\"movie\"):\n # https://api.themoviedb.org/3/find/tt0816692?api_key=096187be7a5391bfa9843173e059137d&language=en-US&external_source=imdb_id\n requestURL = (\n \"https://api.themoviedb.org/3/find/\"\n + ConstructIMDBIDfromNumber(movieID, idtype)\n + \"?api_key=\"\n + myTMDB_APIkey\n + \"&language=en-US&external_source=imdb_id\"\n )\n response = requests.get(requestURL)\n response.raise_for_status()\n if response.status_code == requests.codes.ok:\n tmpJSON = json.loads(response.text)\n\n if idtype == \"movie\" or idtype == \"tv\":\n if tmpJSON[idtype + \"_results\"]:\n # print(idtype + '_results')\n # if tmpJSON[0][idtype + '_results']['vote_average']:\n tmpJSON[idtype + \"_results\"][0][\"TMDBRating\"] = tmpJSON[\n idtype + \"_results\"\n ][0][\"vote_average\"]\n # if tmpJSON[0][idtype + '_results']['vote_count']:\n tmpJSON[idtype + \"_results\"][0][\"TMDBVotes\"] = tmpJSON[idtype + \"_results\"][\n 0\n ][\"vote_count\"]\n # if tmpJSON[0][idtype + '_results']['vote_average']:\n tmpJSON[idtype + \"_results\"][0][\"TMDBAdult\"] = tmpJSON[idtype + \"_results\"][\n 0\n ][\"adult\"]\n tmpJSON[idtype + \"_results\"][0][\"TMDBPopularity\"] = tmpJSON[\n idtype + \"_results\"\n ][0][\"popularity\"]\n tmpJSON[idtype + \"_results\"][0][\"Kind\"] = idtype\n # pprint.pprint(tmpJSON['movie_results'][0])\n return tmpJSON[idtype + \"_results\"][0]\n if idtype == \"person\":\n if tmpJSON[\"person_results\"]:\n tmpJSON[\"person_results\"][0][\"Kind\"] = \"person\"\n return tmpJSON[\"person_results\"][0]\n\n\ndef getTMDBIDfromIMDBID(movieID, idtype=\"movie\"):\n # https://api.themoviedb.org/3/find/tt0816692?api_key=096187be7a5391bfa9843173e059137d&language=en-US&external_source=imdb_id\n requestURL = (\n \"https://api.themoviedb.org/3/find/\"\n + ConstructIMDBIDfromNumber(movieID, idtype)\n + \"?api_key=\"\n + myTMDB_APIkey\n + \"&language=en-US&external_source=imdb_id\"\n )\n response = requests.get(requestURL)\n response.raise_for_status()\n if response.status_code == requests.codes.ok:\n tmpJSON = json.loads(response.text)\n if idtype == \"person\":\n if tmpJSON[\"movie_results\"]: # and len(tmpJSON['movie_results'])>0:\n return {\"id\": tmpJSON[\"person_results\"][0][\"id\"], \"kind\": \"person\"}\n if idtype == \"movie\":\n if tmpJSON[\"movie_results\"]: # and len(tmpJSON['movie_results'])>0:\n return {\"id\": tmpJSON[\"movie_results\"][0][\"id\"], \"kind\": \"movie\"}\n if tmpJSON[\"tv_results\"]: # and len(tmpJSON['tv_results'])>0:\n return {\"id\": tmpJSON[\"tv_results\"][0][\"id\"], \"kind\": \"tv\"}\n return None\n\n\ndef is_downloadable(url):\n \"\"\"\n Does the url contain a downloadable resource?\n by https://www.codementor.io/aviaryan/downloading-files-from-urls-in-python-77q3bs0un\n \"\"\"\n h = requests.head(url, allow_redirects=True)\n header = h.headers\n content_type = header.get(\"content-type\")\n if \"text\" in content_type.lower():\n return False\n if \"html\" in content_type.lower():\n return False\n return True\n\n\ndef getImageDataFromURL(url):\n r = requests.get(url, allow_redirects=True)\n return r.content\n\n\ndef WriteDataToFile(Data, FileURI, Overwrite=False):\n if FileURI:\n if os.path.exists(FileURI) and Overwrite == False:\n raise FileExistsError(\n \"Bug, file writen to already exists and overwrite is set to False\"\n )\n tmpFile = open(FileURI, \"wb\")\n tmpFile.write(Data)\n tmpFile.close()\n else:\n raise ValueError(\"Bug, attempting to write file without filename set properly\")\n\n\ndef test_movie_keywords_should_be_a_list_of_keywords(ia):\n movie = ia.get_movie(\"0133093\", info=[\"keywords\"]) # Matrix\n keywords = movie.get(\"keywords\", [])\n print(\"here!\")\n print(keywords)\n assert 250 <= len(keywords) <= 400\n assert {\"computer-hacker\", \"messiah\", \"artificial-reality\"}.issubset(set(keywords))\n\n\ndef BuildLaunchOptions(argv):\n LaunchOptions = {\n \"ListItems\": False,\n \"DisplayedItemsStatus\": [\n \"Ambiguous\",\n \"Verified\",\n \"Unverified\",\n \"Unimported\",\n \"Incomplete\",\n ],\n \"Filename\": None,\n \"ImdbID\": None,\n \"UpdateFile\": False,\n \"UpdateFileWithID\": False,\n \"UpdateFileWithStatus\": False,\n \"NewStatus\": None,\n \"ScanFolder\": False,\n }\n try:\n opts, args = getopt.getopt(\n argv,\n \"?hl:sd:f:i:ct:\",\n [\n \"help\",\n \"list=\",\n \"set\",\n \"display=\",\n \"file=\",\n \"imdb=\",\n \"--change\",\n \"--status=\",\n ],\n )\n except getopt.GetoptError:\n print(\"Invalid syntax use -?, -h, --help: for help\")\n sys.exit(2)\n for opt, arg in opts:\n if opt.lower() in (\"-h\", \"-?\", \"--help\"):\n print(\"-h, --help: displays this help\")\n print(\n \"-l [all,ambiguous,unimported,verified,incomplete], --list [all,ambiguous,unimported,verified,incomplete]: list all,ambiguous or unimported movies with their IMDB ID\"\n )\n print(\n \"-c -f <filename> -i <new imdbID> -t [Unimported/Unverified/Verified/Ambiguous/Incomplete], --change --file <filename> --imdb <new imdbID> --status [Unimported/Unverified/Verified/Ambiguous/Incomplete]: match imdbID and/or manually set status for movie with given filename\"\n )\n # print('-u -f <filename> -t [Unimported/Unverified/Verified/Ambiguous/Incomplete], --update --file <filename> --status [Unimported/Unverified/Verified/Ambiguous/Incomplete]: manually set the status for the file')\n print(\n \"-i <imdbID>, --imdb <imdbID>: displays details for movie with imdb ID\"\n )\n print(\"-s, --scan: scan and update new movies and cleanup orphans\")\n # print '-i <filename>, --details <imdbID>: display details for movie with imdb ID'\n # print '-i <movie name>, --import <movie name>: '\n # print '-u <imdbID>, --update <imdbID>:'\n sys.exit()\n elif opt.lower() in (\"-l\", \"--list\"):\n if arg.lower() in (\"\", \"all\"):\n LaunchOptions[\"ListItems\"] = True\n elif arg.lower() in (\"ambiguous\"):\n LaunchOptions[\"ListItems\"] = True\n LaunchOptions[\"DisplayedItemsStatus\"] = [\"Ambiguous\"]\n elif arg.lower() in (\"unimported\"):\n LaunchOptions[\"ListItems\"] = True\n LaunchOptions[\"DisplayedItemsStatus\"] = [\"Unimported\"]\n elif arg.lower() in (\"verified\"):\n LaunchOptions[\"ListItems\"] = True\n LaunchOptions[\"DisplayedItemsStatus\"] = [\"Verified\"]\n elif arg.lower() in (\"incomplete\"):\n LaunchOptions[\"ListItems\"] = True\n LaunchOptions[\"DisplayedItemsStatus\"] = [\"Incomplete\"]\n else:\n print(\n \"invalid list argument --list should be all/ambiguous/unimported/verified/incomplete\"\n )\n sys.exit()\n elif opt.lower() in (\"-c\", \"--change\"):\n LaunchOptions[\"UpdateFile\"] = True\n elif opt.lower() in (\"-f\", \"--file\"):\n if arg:\n LaunchOptions[\"Filename\"] = arg\n else:\n print(\"Invalid Syntax: missing filename\")\n sys.exit()\n elif opt.lower() in (\"-i\", \"--imdb\"):\n if re.match(r\"^[0-9]{7}$\", arg):\n LaunchOptions[\"ImdbID\"] = int(arg)\n else:\n print(\"Invalid Imdb ID: should be a 7 digit number\")\n sys.exit()\n elif opt.lower() in (\"-t\", \"--status\"):\n if arg.lower() in (\n \"ambiguous\",\n \"unimported\",\n \"unverified\",\n \"verified\",\n \"incomplete\",\n ):\n LaunchOptions[\"NewStatus\"] = arg.lower().capitalize()\n else:\n print(\n \"Invalid Status: should be Unimported, Unverified, Verified, Ambiguous or Incomplete\"\n )\n sys.exit()\n elif opt.lower() in (\"-s\", \"--scan\"):\n LaunchOptions[\"ScanFolder\"] = True\n\n # 2nd pass: for combined args eg. -c, needs -f and -i\n if LaunchOptions[\"UpdateFile\"]:\n if not LaunchOptions[\"Filename\"]:\n print(\"invalid syntax for -c,--change: Missing filename\")\n print(\"correct syntax: -c -f filename.ext -i 1234567\")\n sys.exit()\n elif not (LaunchOptions[\"ImdbID\"] or LaunchOptions[\"NewStatus\"]):\n print(\n \"invalid syntax for -c,--change: Missing new imdbid or/and new Status \"\n )\n print(\"correct syntax: -c -f filename.ext -i 1234567 -t Ambiguous\")\n sys.exit()\n else:\n if not os.path.isabs(LaunchOptions[\"Filename\"]):\n tmpFileName = os.path.join(SourcePath, LaunchOptions[\"Filename\"])\n LaunchOptions[\"Filename\"] = os.path.basename(LaunchOptions[\"Filename\"])\n if not os.path.exists(tmpFileName):\n print(\"File Not Found\")\n sys.exit()\n return LaunchOptions\n\n\nSourceRelativePath = None\nSourcePath = None\nCacheRelativePath = \"cache\"\nCachePath = os.path.join(os.getcwd(), CacheRelativePath)\nTemplatesRelativePath = \"templates\"\nMyMoviesCatalog = [] # list of Imported Movies\nSleepInMS = 2000 # 2sec interval between calls to google (to prevent detection of bot)\nconstHowManyPortraitsShouldFetch = 6 # fetch six portraits -\nconstHowManyActorsShouldDisplay = (\n 6\n) # pass this const/var to javascript - how many actors should I display for each movie?\nshouldCrossReferenceOnDuckDuckGo = (\n False\n) # was working fine set to True, until duckduckgo stopped working on my computer! Perhaps I was throttles, perhaps it's country-wide issue.\nCurrentMovieDataFileVersion = (\n 19032501\n) # format: YY+MM+DD+INC:0,1,2.. changing this, causes the moviedata files to be modified/touched/updated\nDefaultThumbnailWidth = 256 # 256px\n# ValidFileTypes = [\".mkv\",\".avi\",\".mp4\",\".mov\",\".mpeg\",\".txt\",\".torrent\"] #list of valid filetypes to treat as movie names and fetch their data\n# includeFoldersInValidFileTypes = True #additionally allow folders to be treated as movie names and fetch their data\nfrom DefaultSettings import *\n\ntry:\n from UserSettings import * # if UserSettings.py exists, it overwrites Defaults settings\nexcept ImportError:\n pass\nif SourceRelativePath and not SourcePath:\n SourcePath = os.path.join(os.getcwd(), SourceRelativePath)\nif not myTMDB_APIkey:\n print(\n \"Error: You haven't set an TMDB_APIkey. You need to create an account on TMDB, request an API key and set the generated key into the DefaultSettings.py - sample value: myTMDB_APIkey='036119ba3e5391bfb9941113a089417e'\"\n )\n exit()\nif not SourcePath and not SourceRelativePath:\n print(\n \"Error: You haven't set a folder where the movies are. Open DefaultSettings.py and set SourcePath (absolute) or SourceRelativePath (relative) into the desired one\"\n )\n exit()\n\nia = imdb.IMDb() # imdb object\n\n\ndef main(argv):\n # TestRun()\n\n LaunchOptions = BuildLaunchOptions(argv)\n MakeDirectory(CachePath, \"\", \"\")\n if os.path.isdir(SourcePath) and os.path.isdir(CachePath):\n if LaunchOptions[\"ListItems\"]:\n print(\"\\n\")\n print(\n \" \\t{:^30.30}\\t{:<4.4}\\t{:<7.7}\\t{:<13.13}\\t{:<60.60}\".format(\n \"Movie Name\", \"Year\", \"IMDB ID\", \"Import Status\", \"Filename\"\n )\n )\n print(\"{:_<132}\".format(\"\"))\n myreps = 0\n # SourceFilesList=set(os.listdir(SourcePath))\n SourceFilesList = {}\n CacheFilesList = {}\n if includeFoldersInValidFileTypes:\n SourceFilesList = {\n f + os.sep\n for f in os.listdir(SourcePath)\n if os.path.isdir(os.path.join(SourcePath, f))\n }\n SourceFilesList = SourceFilesList.union(\n {\n f\n for f in os.listdir(SourcePath)\n if os.path.isfile(os.path.join(SourcePath, f))\n }\n )\n # CacheFilesList=os.listdir(CachePath)\n if includeFoldersInValidFileTypes:\n CacheFilesList = {\n f + os.sep\n for f in os.listdir(CachePath)\n if os.path.isdir(os.path.join(CachePath, f, os.sep))\n }\n CacheFilesList = CacheFilesList.union(\n {\n f\n for f in os.listdir(CachePath)\n if os.path.isfile(os.path.join(CachePath, f))\n }\n )\n for File in CacheFilesList:\n FileURI = os.path.join(CachePath, File)\n if File.endswith(\".ffiles/\"):\n StrippedMovieName = StripExtension(File)\n if LaunchOptions[\"ScanFolder\"] and isFolderOrphan(\n SourceFilesList, CachePath, StrippedMovieName\n ):\n print(\"deleting: \" + File)\n DeleteFolder(CachePath, StrippedMovieName)\n for File in SourceFilesList:\n FileURI = os.path.join(SourcePath, File)\n # 13/4/19: HERE!!!!\n if isValidMovieFile(SourcePath, File):\n StrippedMovieName = StripExtension(File)\n # print(\".\", end =\"\")\n ShouldUpdateFile = False\n # print(str(LaunchOptions[\"UpdateFile\"]) + \"\\t\"+ LaunchOptions[\"Filename\"] +\"\\t\" + File)\n if (\n LaunchOptions[\"UpdateFile\"]\n and LaunchOptions[\"Filename\"] == File\n and LaunchOptions[\"Filename\"]\n ):\n ShouldUpdateFile = (\n True\n ) # apparently, this is one file that needs updating! Bypassing main loop\n print(\".\")\n if (ShouldUpdateFile and LaunchOptions[\"ImdbID\"]) or (\n LaunchOptions[\"ScanFolder\"]\n and isMissingDataFile(CachePath, StrippedMovieName)\n ):\n myreps += 1\n if myreps % 25 == 0:\n print(\"sleeping:\")\n time.sleep(randint(10, 100))\n print(\"woken.\")\n # perform prefetching loop for new movies\n FileClean = PreCleanUpFileName(File)\n info = PTN.parse(FileClean)\n MovieName = PostCleanUpFileName(info[\"title\"])\n MakeDirectory(CachePath, StrippedMovieName)\n if \"year\" not in info:\n info[\"year\"] = \"\"\n # print(\"Searching for \" + MovieName +\":\" )\n old_time = int(round(time.time() * 1000))\n MovieID1 = None\n MovieID2 = None\n MovieID = None\n if (\n LaunchOptions[\"UpdateFile\"]\n and LaunchOptions[\"ImdbID\"]\n and ShouldUpdateFile\n ):\n MovieID1 = LaunchOptions[\"ImdbID\"]\n print(\"fetching data for movie with ID \" + str(MovieID1))\n MovieID2 = MovieID1\n MovieName = \"New\"\n else:\n MovieID1 = GetMovieIDfromWeb(\n MovieName, info[\"year\"], Provider=\"google\"\n )\n if shouldCrossReferenceOnDuckDuckGo:\n # check on duckduckgo too!\n MovieID2 = GetMovieIDfromWeb(\n MovieName, info[\"year\"], Provider=\"duckduckgo\"\n )\n else:\n MovieID2 = MovieID1 # bypass duckduckgo\n MovieID = None\n tmpMovieData = {}\n # print(\"Check: %s is %d on google and %d on tmdb\" %(MovieName,MovieID1,MovieID2))\n if MovieID1 == MovieID2 and MovieID1 == -1:\n print(MovieName + \" not found\")\n tmpMovieData[\"Status\"] = \"Unimported\"\n tmpMovieData[\"Kind\"] = \"Unknown\"\n else:\n if MovieID1 == -1:\n MovieID = (\n MovieID2\n ) # if duckduckgo had an answer and google didn't, pick duckduckgo (though it's 99% wrong)\n else:\n MovieID = (\n MovieID1\n ) # google does better, anyways, so pick google.\n if MovieID1 != MovieID2:\n print(\n \"Ambiguity: %s is %d on google and %d on duckduckgo\"\n % (MovieName, MovieID1, MovieID2)\n )\n tmpMovieData[\"Status\"] = \"Ambiguous\"\n else:\n print(\"%s ID is %d\" % (MovieName, MovieID))\n if LaunchOptions[\"UpdateFile\"] and ShouldUpdateFile:\n tmpMovieData[\n \"Status\"\n ] = \"Verified\" # hey, it's set by the user!\n else:\n tmpMovieData[\n \"Status\"\n ] = \"Unverified\" # automatic match, might be wrong\n if MovieID > 0:\n # for x in range(40):\n # \tWrapTMDBDataInIMDB(MovieID)\n tmpData = WrapTMDBDataInIMDB(MovieID)\n if tmpData:\n tmpMovieData.update(tmpData)\n else:\n print(\n \"Error, couldn't find movie with ImdbID %d on TMDB\"\n % (MovieID)\n )\n tmpMovieData[\n \"Status\"\n ] = (\n \"Incomplete\"\n ) # assumed TMDB found nothing, deem data incomplete\n else:\n print(\n \"Bug, couldn't find movie because I chose movieID %d whereas it should never reach here\"\n % (MovieID)\n )\n print(\".\", end=\"\", flush=True)\n # pprint.pprint(tmpMovieData)\n if \"TMDBID\" in tmpMovieData:\n tmp = FetchMovieDataIMDBLite(MovieID, True)\n tmpMovieData = selectiveMerge(tmpMovieData, tmp)\n print(\".\", end=\"\", flush=True)\n # pprint.pprint(tmp)\n else:\n tmp = FetchMovieDataIMDBLite(MovieID, True)\n tmpMovieData = update(tmpMovieData, tmp)\n print(\".\", end=\"\", flush=True)\n # pprint.pprint(tmp)\n print(\".\", end=\"\", flush=True)\n if \"TMDBPoster\" in tmpMovieData:\n PosterURL = ConstructTMDBImageURL(\n tmpMovieData[\"TMDBPoster\"], ImageWidth=\"original\"\n )\n if PosterURL:\n if (\n isMissingPosterFile(CachePath, StrippedMovieName)\n or ShouldUpdateFile\n ):\n PosterData = getImageDataFromURL(PosterURL)\n PosterFileName = os.path.join(\n CachePath,\n StrippedMovieName + \".ffiles\",\n \"poster.jpg\",\n )\n WriteDataToFile(\n PosterData, PosterFileName, ShouldUpdateFile\n )\n else:\n print(\n \"Bug, poster file already exists in folder - why? should had already purged\"\n )\n if \"TMDBBackdrop\" in tmpMovieData:\n BackdropURL = ConstructTMDBImageURL(\n tmpMovieData[\"TMDBBackdrop\"], ImageWidth=\"original\"\n )\n if BackdropURL:\n if (\n isMissingBackdropFile(CachePath, StrippedMovieName)\n or ShouldUpdateFile\n ):\n BackdropData = getImageDataFromURL(BackdropURL)\n BackdropFileName = os.path.join(\n CachePath,\n StrippedMovieName + \".ffiles\",\n \"backdrop.jpg\",\n )\n WriteDataToFile(\n BackdropData, BackdropFileName, ShouldUpdateFile\n )\n else:\n print(\n \"Bug, backdrop file already exists in folder - why? should had already purged\"\n )\n\n # DATA POST FIXES -- for everytime you import something:\n # 1. remove authorname from plot\n tmpMovieData[\"Plot\"] = RemoveAuthorFromPlot(\n tmpMovieData[\"Plot\"]\n )\n tmpMovieData[\"Languages\"] = ConvertLanguageCodeToName(\n tmpMovieData[\"Languages\"]\n )\n tmpMovieData[\"Countries\"] = ConvertCountryCodeToName(\n tmpMovieData[\"Countries\"]\n )\n tmpMovieData[\"DateAdded\"] = time.strftime(\n \"%Y-%m-%d\", time.gmtime(os.path.getmtime(FileURI))\n )\n # ^--- date that the movie file was added\n tmpMovieData[\"DateImported\"] = datetime.date.today().strftime(\n \"%Y-%m-%d\"\n )\n # ^--- date tha the file was imported into the database (ie today)\n tmpMovieData[\"Filename\"] = StrippedMovieName\n tmpMovieData[\"DataInternalVersion\"] = CurrentMovieDataFileVersion\n CreateMovieDataFile(\n CachePath, StrippedMovieName, tmpMovieData, ShouldUpdateFile\n )\n if doesFileExist(\n CachePath, tmpMovieData[\"Filename\"], \"poster.jpg\"\n ) and (\n ShouldUpdateFile\n or not doesFileExist(\n CachePath, tmpMovieData[\"Filename\"], \"poster_thumbnail.jpg\"\n )\n ):\n GenerateGalleryThumbnail(\n CachePath,\n tmpMovieData[\"Filename\"],\n \"poster.jpg\",\n \"poster_thumbnail.jpg\",\n DefaultThumbnailWidth,\n )\n new_time = int(round(time.time() * 1000))\n diff_time = new_time - old_time\n remaining_sleep = SleepInMS - diff_time\n # print('old:%d new:%d, diff:%d' % (old_time, new_time, remaining_sleep))\n if remaining_sleep > 0:\n time.sleep(remaining_sleep / 1000.0)\n ignore_not_found = not LaunchOptions[\n \"ScanFolder\"\n ] # ignore not found files and suppress warnings, if you were not asked to scan (and update files)\n\n MovieData2 = RestoreMovieDataFile(\n CachePath, StrippedMovieName, ignore_not_found\n )\n\n # print(\"LO\")\n # pprint.pprint(LaunchOptions)\n # print(StrippedMovieName)\n # print(\"MD2\")\n # pprint.pprint(MovieData2)\n if MovieData2:\n if (\n LaunchOptions[\"ListItems\"]\n and MovieData2[\"Status\"]\n in LaunchOptions[\"DisplayedItemsStatus\"]\n ):\n if MovieData2[\"Status\"] == \"Verified\":\n print(\n \"[+]\\t{:<30.30}\\t{:4d}\\t{:7d}\\t{:>13.13}\\t{:<10.60}\".format(\n MovieData2.get(\"Title\"),\n MovieData2.get(\"ReleaseYear\"),\n MovieData2.get(\"ImdbID\"),\n MovieData2.get(\"Status\"),\n MovieData2.get(\"Filename\"),\n )\n )\n if MovieData2[\"Status\"] == \"Unimported\":\n print(\n \"[-]\\t{:^30.30}\\t{:^4.4}\\t{:^7.4}\\t{:>13.13}\\t{:<10.60}\".format(\n \"-\",\n \"-\",\n \"-\",\n MovieData2.get(\"Status\"),\n MovieData2.get(\"Filename\"),\n )\n )\n if MovieData2[\"Status\"] == \"Unverified\":\n print(\n \" \\t{:<30.30}\\t{:4d}\\t{:7d}\\t{:>13.13}\\t{:<10.60}\".format(\n MovieData2.get(\"Title\"),\n MovieData2.get(\"ReleaseYear\"),\n MovieData2.get(\"ImdbID\"),\n MovieData2.get(\"Status\"),\n MovieData2.get(\"Filename\"),\n )\n )\n if MovieData2[\"Status\"] == \"Ambiguous\":\n print(\n \"(?)\\t{:<30.30}\\t{:4d}\\t{:7d}\\t{:>13.13}\\t{:<10.60}\".format(\n MovieData2.get(\"Title\"),\n MovieData2.get(\"ReleaseYear\"),\n MovieData2.get(\"ImdbID\"),\n MovieData2.get(\"Status\"),\n MovieData2.get(\"Filename\"),\n )\n )\n if MovieData2[\"Status\"] == \"Incomplete\":\n print(\n \"[?]\\t{:<30.30}\\t{:4d}\\t{:7d}\\t{:>13.13}\\t{:<10.60}\".format(\n MovieData2.get(\"Title\"),\n MovieData2.get(\"ReleaseYear\"),\n MovieData2.get(\"ImdbID\"),\n MovieData2.get(\"Status\"),\n MovieData2.get(\"Filename\"),\n )\n )\n # else:\n # (do not display pending files)\n # \t\tprint(\"***\\t{:^30.30}\\t{:^4.4}\\t{:^7.4}\\t{:>13.13}\\t{:<10.60}\".format(\"-\",\"-\",\"-\",\"Pending\",StrippedMovieName))\n if ShouldUpdateFile and MovieData2:\n if (\n LaunchOptions[\"UpdateFile\"]\n and LaunchOptions[\"NewStatus\"]\n and ShouldUpdateFile\n ):\n MovieData2[\"Status\"] = LaunchOptions[\"NewStatus\"]\n UpdateMovieDataFile(\n CachePath, MovieData2[\"Filename\"], MovieData2\n )\n print(\"updating...\")\n elif ShouldUpdateFile:\n print(\n \"File not imported yet, please import first (with -s,--scan option)\"\n )\n # F12BUGGGGGGG\n\n if MovieData2:\n MyMoviesCatalog.append(copy.deepcopy(MovieData2))\n # pprint.pprint(MyMoviesCatalog[-1])\n print(\"\\n============ Building index ===========\")\n for Movie in MyMoviesCatalog:\n # FIX1: for early imported json data, where the file with the full extension was saved, convert them in-memory\n\n FileIsDirty = False\n\n if \"DataInternalVersion\" not in Movie:\n Movie[\"DataInternalVersion\"] = 0\n FileIsDirty = True\n\n if Movie.get(\"DataInternalVersion\") < CurrentMovieDataFileVersion:\n print(\n \"Old file version. Setting update dirty bit for \"\n + Movie[\"Filename\"]\n )\n Movie[\"DataInternalVersion\"] = CurrentMovieDataFileVersion\n FileIsDirty = True\n\n # FIX1: strip .avi from filename [done, did once]\n # if Movie['Filename'][-8:] == '.avi':\n # \tprint(\"Updating Filename for \"+Movie['Filename'])\n # \tMovie['Filename']=StripExtension(Movie['Filename'])\n # \tFileIsDirty = True\n\n # FIX2: strip ::AuthorName from plot tags [done, did once]\n # if \"Plot\" in Movie:\n # \tMovie['Plot'] = RemoveAuthorFromPlot(Movie['Plot'])\n # \tprint(\"Stripping Author from Plot for \"+Movie['Filename'])\n # \tFileIsDirty = True\n\n # FIX3: find language that are encloded as 2 words\n \"\"\"\n\t\t\tif Movie.get('Languages'):\n\t\t\t\tfor index in range(len(Movie['Languages'])):\n\t\t\t\t\tLang=Movie['Languages'][index]\n\t\t\t\t\tif len(Lang) == 2:\n\t\t\t\t\t\tMovie['Languages'][index] = LanguageNameByCode(Lang)\n\t\t\t\t\t\tFileIsDirty = True\n\t\t\tif Movie.get('Countries'):\n\t\t\t\tfor index in range(len(Movie['Countries'])):\n\t\t\t\t\tCountry = Movie['Countries'][index]\n\t\t\t\t\tif len(Country) == 2:\n\t\t\t\t\t\tMovie['Countries'][index] = CountryNameByCode(Country)\n\t\t\t\t\t\tFileIsDirty = True\n\t\t\t\"\"\"\n # FIX4: update all files so that the 1st actor in the list, gets order=0 (Should need to run once only)\n \"\"\"\n\t\t\tif \"Kind\" in Movie and Movie[\"Kind\"] not in [\"Unknown\"]:\n\t\t\t\tif \"Cast\" in Movie:\n\t\t\t\t\tif len(Movie[\"Cast\"])> 0:\n\t\t\t\t\t\tif not\"Order\" in Movie[\"Cast\"][0]:\n\t\t\t\t\t\t\tMovie[\"Cast\"][0][\"Order\"] = 0\n\t\t\t\t\t\t\tFileIsDirty = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(\"error, Actor\" + Movie[\"Cast\"][0][\"Name\"]+\" already has order set in \" + Movie['Title'])\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(\"error, empty Cast list in \" + Movie['Title'])\n\t\t\t\telse:\n\t\t\t\t\tprint(\"error, no Cast in \" + Movie['Title'])\n\t\t\t\"\"\"\n\n if doesFileExist(\n CachePath, Movie[\"Filename\"], \"poster.jpg\"\n ) and not doesFileExist(\n CachePath, Movie[\"Filename\"], \"poster_thumbnail.jpg\"\n ):\n GenerateGalleryThumbnail(\n CachePath,\n Movie[\"Filename\"],\n \"poster.jpg\",\n \"poster_thumbnail.jpg\",\n DefaultThumbnailWidth,\n )\n # print(\"Generating Thumbnail for movie \"+Movie['Filename'])\n\n if FileIsDirty == True:\n UpdateMovieDataFile(CachePath, Movie[\"Filename\"], Movie)\n print(\"Writing Changes for movie file \" + Movie[\"Filename\"])\n\n # if 'ImdbID' in Movie:\n # if Movie['ImdbID']==54698 or Movie['ImdbID']==6628102 or Movie['ImdbID']==499262 or Movie['Status'] == \"Unimported\" or Movie['ImdbID']==2675914:\n # \ttmpString=Movie['Filename']\n\n BuildIndex(MyMoviesCatalog)\n print(\"\\n================= Done ================\")\n PickleMyCatalog = False\n if PickleMyCatalog:\n file_Name = os.path.join(CachePath, \"fulldump.tmp\")\n fileObject = open(file_Name, \"wb\")\n pickle.dump(MyMoviesCatalog, fileObject)\n fileObject.close()\n\n JSONMyCatalog = False\n if JSONMyCatalog:\n file_Name = os.path.join(CachePath, \"fulldump.json\")\n with open(file_Name, \"w\") as JSONFile:\n json.dump(MyMoviesCatalog, JSONFile)\n else:\n print(\"Path \" + CachePath + \" does not exist\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.7569620013237, "alphanum_fraction": 0.7639240622520447, "avg_line_length": 65.76056671142578, "blob_id": "98566bfb0c353045000e5ec9bea8a0f50cefa144", "content_id": "6717ffc45d2aa6165f02bf73e7a147a82578ff31", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4740, "license_type": "permissive", "max_line_length": 487, "num_lines": 71, "path": "/readme.md", "repo_name": "SecretSanta007/MovieCatalog", "src_encoding": "UTF-8", "text": "MovieCatalog\n========\n\n## What is MovieCatalog?\n**Movie Catalog** is a python script that fetches movie data from various sources (imdb,tmdb,youtube,google) and generates a user-friendly catalog for all the movies the user has inside a folder. I created it to best manage my watchlist and quickly scan for movie information without jumping from site to site. It can work however on things such as movie files, screenshots, subtitle collections, or torrent files, or simple text files as long as the filenames are indicative of a movie.\n\n![image1](readme.resources/scroll09.gif)\n\n![movie page](readme.resources/moviepage03.gif)\n\nThe resulting catalog can be found inside the `index.html` file. \n\n (!) MovieCatalog is NOT a movie downloader.\n\n## How does MovieCatalog find which movies to download data for?\n**MovieCatalog** is filesystem-based meaning that it scans a specific folder (*Source*) for files and folders (eg. .srt files, video files, .torrent files or .txt files) and fetches the information for the movies extracted from the filenames. In other words, it treats the folder's content, as a list of movie names. The downloaded data are kept in a `cache` folder.\n\n![filelist](readme.resources/filelist01.jpg)\n\n## Features\n### Runs on demand\n**MovieCatalog** does not run in the background but can be automated to run when a file is added or removed from a folder (eg via smart folders/Automator) - The .py script updates the catalog by scanning changes in the *Source* folder, fetching the data and rebuilding the monolithic `index.html` file and then quits. \n### Is filesystem-based\nYou need only manage your movie files (Source). The script makes sure that there is parity between the source movie files/folders and the cached data. If a movie exists and its cache doesn't, it fetches the data. If a movie no longer exists, it deletes the orphaned cache silently.\n### Contains various filters for data sorting/filtering\nSupports various sorting options, as well as filtering by media type (movie,tv series) or combined genres (by pressing `Cmd`/`Ctrl`)\n\n![filtering](readme.resources/filter04.gif)\n\n**MovieCatalog** allows full-text searching inside keywords, synopsis, cast and crew data, alternative titles, ratings and things such as the release year etc.\n\n![searching](readme.resources/search03.gif)\n\n### Other features\nThe MovieCatalog `index.html` page is responsive, allows switching between light/dark mode easily and links to trailers in order to provide a one-stop experience to the user.\nIt displays the Director of Photography/Cinematographer since I find it quite important. There are links for the movies/shows and the cast/crew members to their respective IMDB and TMDB pages.\n\n## Installation\n1. Copy the files `main.py`,`DefaultSettings.py`,`templates/` and `resources/` into a folder\n2. Install Python3, get the dependencies (requirements.txt) and build the environment.\n3. In order to use the script, you need a free account at http://www.themoviedb.org\n - request a free API key at https://developers.themoviedb.org/3/getting-started/introduction\n - open the file `DefaultSettings.py` and paste the API key inside the quotes of `myTMDB_APIkey='__________________'`\n4. Find the absolute path of the folder where the movies are,\n - open the file `DefaultSettings.py` and paste the absolute path inside the quotes of `SourcePath=''` eg. `SourcePath='/Users/User1/MyFiles/Movies/'`\n5. Save the file.\n\n## Usage\n### Basic Usage\n- run `main.py -s` to **scan** for new/deleted movie files and then rebuild the html file. When importing many files, there may be some random waiting intervals to prevent blacklisting on google.com\n- open `index.html` in your browser\n### Changing mismatches\nIf the movie was matched to the wrong IMDB id, or not matched at all, or incomplete data was fetched:\n- run `main.py -l unimported` (unimported or all,ambiguous,verified,incomplete) to **display** the files that were affected.\n- run `main.py -c -f <filename> -i <new imdbID>` with the filename and the correct IMDB ID(eg.`main.py -c -f 'Vertigo.mp4' -i 0052357`)\n### Other usage\n- run `main.py` to **rebuild** the `index.html` without scanning for new/deleted files\n- run `main.py -?` displays command-line parameters\n\n### notes using with pipenv/pip3\npipenv run python3 main.py -s\n\n## Limitations/Compatibility\nIt has been tested on Opera, Firefox and Safari and Python3.\nTested with 700 movies. Since there is no actual database on the backend, more files mean slower performance overall\n\n## future work\n- [ ] get rid of the command-line interface and allow movie-meta editing via the app (django/flask)\n- [ ] store data in an SQL database\n- [ ] allow import from imdb watchlist\n- [ ] allow scanning multiple locations\n" }, { "alpha_fraction": 0.4712041914463043, "alphanum_fraction": 0.6963350772857666, "avg_line_length": 15.608695983886719, "blob_id": "a6cd40ac23b7dbacbdbae1781a346f8bdddc18ce", "content_id": "901ee8e88bee5a96c3d358f94281a8342038a906", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 382, "license_type": "permissive", "max_line_length": 25, "num_lines": 23, "path": "/requirements.txt", "repo_name": "SecretSanta007/MovieCatalog", "src_encoding": "UTF-8", "text": "beautifulsoup4==4.7.1\nbs4==0.0.1\ncertifi==2018.11.29\nchardet==3.0.4\ndocutils==0.14\ngoogle==2.0.1\nidna==2.8\nIMDbPY==6.7\nJinja2==2.10.1\nKivy==1.10.1\nKivy-Garden==0.1.4\nlxml==4.3.0\nMarkupSafe==1.1.0\nparse-torrent-name==1.1.1\nPillow==5.4.1\npipenv==2018.11.26\nPygments==2.3.1\nrequests==2.21.0\nsoupsieve==1.7.3\nSQLAlchemy==1.3.0\nurllib3==1.24.2\nvirtualenv==16.3.0\nvirtualenv-clone==0.5.1\n" }, { "alpha_fraction": 0.6824584007263184, "alphanum_fraction": 0.6862996220588684, "avg_line_length": 30.239999771118164, "blob_id": "feebdbd69258c003f7385d4ca76c9c65285b9de5", "content_id": "e1a3f158d767e55daf3cfa0970f3b2cbe18b3a50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "permissive", "max_line_length": 82, "num_lines": 25, "path": "/DefaultSettings.py", "repo_name": "SecretSanta007/MovieCatalog", "src_encoding": "UTF-8", "text": "# list of valid filetypes to treat as movie names in order to fetch their data\nValidFileTypes = [\n \".mkv\",\n \".avi\",\n \".mp4\",\n \".mov\",\n \".mpeg\",\n \".txt\",\n \".torrent\",\n \".jpg\",\n \".png\",\n]\n\n# allow folders to be treated as movie names and fetch their data? True/False\nincludeFoldersInValidFileTypes = True\n\n# personal themoviedb.org API key request one at:\n# https://developers.themoviedb.org/3/getting-started/introduction\n# after creating a free account\nmyTMDB_APIkey = \"\" # <------ type keyinside quotes\n\n# absolute path with the Source (movie) files:\nSourcePath = \"\" # eg. SourcePath='/Users/user1/Video/MyMovies'\n# alternatively use relative path to the script by uncommenting the following line\n# SourceRelativePath='' # <------- type relative path\n" } ]
4
excludedBittern8/ahd_cross_seed
https://github.com/excludedBittern8/ahd_cross_seed
1b008194236cb0462b10d94c6c4f109b07bacc1c
75e325844dee0444dd8227c99c85667994932ccc
99702f09da98d814e6bbbaab5ad07af845a66a3a
refs/heads/master
2023-03-21T13:48:03.554799
2021-01-29T07:56:59
2021-01-29T07:56:59
274,007,577
0
0
null
2020-06-22T00:46:31
2021-01-29T07:57:05
2021-03-19T20:11:45
Python
[ { "alpha_fraction": 0.6035349369049072, "alphanum_fraction": 0.605349063873291, "avg_line_length": 37.20396041870117, "blob_id": "8a10a597922212f07d1280ddd81cc23b85c07837", "content_id": "e14ce9c5f96de367d83db735a71f878eac9082c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19293, "license_type": "no_license", "max_line_length": 358, "num_lines": 505, "path": "/general.py", "repo_name": "excludedBittern8/ahd_cross_seed", "src_encoding": "UTF-8", "text": "from datetime import date,timedelta, datetime\nfrom classes import *\nimport requests\nimport xmltodict\nimport subprocess\nfrom prompt_toolkit.shortcuts import message_dialog\nfrom prompt_toolkit.shortcuts import input_dialog\nfrom prompt_toolkit.shortcuts import radiolist_dialog\nfrom prompt_toolkit.shortcuts import button_dialog\nfrom prompt_toolkit.shortcuts import checkboxlist_dialog\nimport os\nimport re\nimport logging\nimport time\nahdlogger = logging.getLogger('AHD')\n\"\"\"\nGeneral Functions\n\"\"\"\ndef get_matches(arguments,files):\n wget=arguments['--wget']\n torrentfolder=arguments['--torrent']\n api=arguments['--api']\n cookie=arguments['--cookie']\n datefilter=(date.today()- timedelta(int(arguments['--date'])))\n currentdate=datetime.now().strftime(\"%m.%d.%Y_%H%M\")\n\n file=files.get_first()\n if file==\"No Files\":\n return\n filesize=files.get_size()\n fileguessit=guessitinfo(file)\n fileguessit.set_values()\n title=fileguessit.get_name().lower()\n if fileguessit.get_season()!=\"\":\n title=title+\": \" + fileguessit.get_season()\n imdb=get_imdb(fileguessit.get_info())\n if imdb==None:\n\n ahdlogger.warn(file,\" could not find IMDB\")\n return\n search = \"https://awesome-hd.club/searchapi.php?action=imdbsearch&passkey=\" + api + \"&imdb=tt\" + imdb\n ahdlogger.warn(f\"Searching For {files.type} with: {search}:{currentdate}\")\n try:\n response = requests.get(search, timeout=300)\n except:\n ahdlogger.warn(f\"Issue getting response: {search}:{currentdate}\")\n return\n try:\n results=xmltodict.parse(response.content)\n except:\n ahdlogger.warn(\"unable to parse xml\")\n return\n try:\n results['searchresults']['torrent'][1]['name']\n loop=True\n max=len(results['searchresults']['torrent'])\n except KeyError as key:\n if str(key)==\"1\":\n element=results['searchresults']['torrent']\n max=1\n loop=False\n else:\n ahdlogger.warn(\"Probably no results\")\n return\n for i in range(max):\n titlematch=False\n filedate=False\n group=False\n resolution=False\n source=False\n sizematch=False\n if loop: element = results['searchresults']['torrent'][i]\n querytitle=lower(element['name'])\n if querytitle==None:\n continue\n querygroup=lower(element['releasegroup'])\n if querygroup==None:\n querygroup=\"\"\n queryresolution=element['resolution']\n querysource=lower(element['media'])\n if querysource==\"uhd blu-ray\":\n querysource=\"blu-ray\"\n if querysource==\"web-dl\" or querysource==\"webrip\":\n querysource=\"web\"\n queryencoding=element['encoding']\n querysize= int(element['size'])\n querydate=datetime.strptime(element['time'], '%Y-%m-%d %H:%M:%S').date()\n if querytitle==title:\n titlematch=True\n if querysource==fileguessit.get_source() or fileguessit.get_source()==\"\":\n source=True\n if querygroup==fileguessit.get_group() or re.search(querygroup,fileguessit.get_group(),re.IGNORECASE)!=None \\\n or re.search(fileguessit.get_group(),querygroup,re.IGNORECASE)!=None or fileguessit.get_group()==\"\":\n group=True\n if queryresolution==fileguessit.get_resolution():\n resolution=True\n if datefilter < querydate:\n filedate=True\n if difference(querysize,filesize)<.01:\n sizematch=True\n if (titlematch is True and source is True and group is True and resolution is True \\\n and filedate is True) and (sizematch is True or filesize==0):\n pass\n else:\n continue\n ahdlogger.debug(f\"Comparison UserTitle:{title} SiteTite:{querytitle} UserSource{fileguessit.get_source()} SiteSource:{querysource} UserGroup:{fileguessit.get_group()} SiteGroup:{querygroup} UserRes:{fileguessit.get_resolution()} SiteRes:{queryresolution} Date:{filedate} \\n \")\n\n if arguments['--output']!=None and arguments['--output']!=\"\" and arguments['--output']!=\"None\":\n link=\"https://awesome-hd.club/torrents.php?id=\" + element['groupid']+\"&torrentid=\"+ element['id']\n t=open(arguments['--output'],'a')\n ahdlogger.warn(\"writing to file:\",arguments['--output'])\n t.write(link+'\\n')\n if arguments['--torrent']!=None and arguments['--torrent']!=\"\" and arguments['--torrent']!=\"None\":\n link=\"https://awesome-hd.club/torrents.php?action=download&id=\" +element['id'] +\"&torrent_pass=\" + api\n title=re.sub(\": \",\"-\",querytitle)\n name=(f\"[AHD]{title}.{querysource}.{queryresolution}.{querygroup}.torrent\")\n name=re.sub(\"/\", \".\",name)\n torrent=os.path.join(torrentfolder,name)\n ahdlogger.warn(torrent)\n ahdlogger.warn(link)\n\n\n try:\n subprocess.run([wget,'--load-cookies',cookie,link,'-O',torrent])\n except:\n ahdlogger.warn(f\"{title}: Could not find Download-{currentdate}\")\n\ndef get_missing(arguments,files,encode=None):\n\n\n currentdate=datetime.now().strftime(\"%m.%d.%Y_%H%M\")\n\n if encode==None:\n encode=False\n api=arguments['--api']\n output=arguments['--misstxt']\n file=files.get_first()\n\n if file==\"No Files\":\n return\n filesize=files.get_size()\n fileguessit=guessitinfo(file)\n fileguessit.set_values()\n title=fileguessit.get_name().lower()\n if fileguessit.get_season()!=\"\":\n title=title+\": \" + fileguessit.get_season()\n imdb=get_imdb(fileguessit.get_info())\n if imdb==None:\n ahdlogger.warn(f\"{file}: could not find IMDB\")\n\n return\n search = \"https://awesome-hd.club/searchapi.php?action=imdbsearch&passkey=\" + api + \"&imdb=tt\" + imdb\n ahdlogger.warn(f\"Searching For {files.type} with: {search}\")\n\n try:\n response = requests.get(search, timeout=300)\n except:\n ahdlogger.warn(f\"{search}: Could not find Get a response from AHD URL:{files.get_type()}-{currentdate}\")\n return\n try:\n results=xmltodict.parse(response.content)\n except:\n ahdlogger.warn(f\"{title}: Could not find parse AHD XML:{search} {files.get_type()}-{currentdate}\")\n return\n try:\n results['searchresults']['torrent'][1]['name']\n loop=True\n max=len(results['searchresults']['torrent'])\n except KeyError as key:\n if str(key)==\"1\":\n element=results['searchresults']['torrent']\n max=1\n loop=False\n else:\n ahdlogger.warn(f\"{title}:Probably no results\")\n addmissing(output,files,file)\n return\n for i in range(max):\n\n titlematch=False\n group=False\n resolution=False\n source=False\n sizematch=False\n if loop: element = results['searchresults']['torrent'][i]\n querytitle=lower(element['name'])\n if querytitle==None:\n continue\n querygroup=lower(element['releasegroup'])\n if querygroup==None:\n querygroup=\"\"\n queryresolution=element['resolution']\n querysource=lower(element['media'])\n if querysource==\"uhd blu-ray\":\n querysource=\"blu-ray\"\n if querysource==\"web-dl\" or querysource==\"webrip\":\n querysource=\"web\"\n queryencoding=element['encoding']\n querysize= int(element['size'])\n\n if querytitle==title:\n titlematch=True\n if querysource==fileguessit.get_source() or fileguessit.get_source()==\"\":\n source=True\n if querygroup==fileguessit.get_group() or re.search(querygroup,fileguessit.get_group(),re.IGNORECASE)!=None \\\n or re.search(fileguessit.get_group(),querygroup,re.IGNORECASE)!=None or fileguessit.get_group()==\"\":\n group=True\n if queryresolution==fileguessit.get_resolution():\n resolution=True\n if difference(querysize,filesize)<.01:\n sizematch=True\n\n\n ahdlogger.debug(f\"Comparison UserTitle:{title} SiteTite:{querytitle} UserSource:{fileguessit.get_source()} SiteSource:{querysource} UserGroup:{fileguessit.get_group()} SiteGroup:{querygroup} UserRes:{fileguessit.get_resolution()} SiteRes:{queryresolution} \\n \")\n if encode is False and source is True and resolution is True:\n return\n if titlematch is True and source is True and group is True and resolution is True \\\n and sizematch is True and filesize!=0:\n return\n\n addmissing(output,files,file)\ndef addmissing(output,files,file):\n ahdlogger.warn(\"Adding Potential Upload to File\")\n output=open(output,\"a+\")\n output.write(\"AHD:\")\n if files.get_dir()==\"0\":\n output.write(\"Directory:Single-File:\")\n output.write(file)\n else:\n output.write(f\"Directory:{files.get_dir()}:\")\n output.write(file)\n output.write('\\n')\n output.close()\n\n\ndef get_imdb(details):\n title = details.get('title')\n ia = IMDb()\n if title==None:\n return title\n for i in range(0,16):\n if i==15:\n return None\n try:\n results = ia.search_movie(title)\n break\n except Exception as e:\n time.sleep(10)\n if len(results) == 0:\n return None\n for movie in results:\n if ((details.get('year')==movie.get('year')) and (movie.get('year')!=None or details.get('year')!=None )):\n return movie.movieID\n else:\n return results[0].movieID\ndef difference(value1,value2):\n dif=abs((value2-value1)/((value1+value2)/2))\n return dif\ndef lower(input):\n if input==None:\n return input\n else:\n input=input.lower()\n return input\ndef createconfig(config):\n configpath=os.path.dirname(os.path.abspath(__file__))+\"/ahd_cross.txt\"\n config.read(configpath)\n\n\n if config.has_section('general') ==False:\n config.add_section('general')\n if config.has_section('grab') ==False:\n config.add_section('grab')\n if config.has_section('scan') ==False:\n config.add_section('scan')\n message_dialog(\n title=\"Config Creator\",\n text=\"Welcome to the Config Creator.\\nA config File is recommended to run this program\\nWe will Start by adding root or Folders to Scan\\nNote You'll need at least one root\\nNote:This will overright ahd_cross.txt if you confirm at the end\",\n ).run()\n\n newroot =True\n root=None\n rootstr=\"\"\n ignorestr=\"\"\n while newroot:\n if root==None:\n root = input_dialog(title='Getting Root Directories ',text='Please Enter the Path to a Root Directory:').run()\n if root==None:\n break\n addstring=\"Adding:\"+root + \" is this Okay? \"\n option = button_dialog(\n title=addstring,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n if option==False:\n root=None\n pass\n else:\n rootstr=rootstr+root+\",\"\n root=None\n newroot= button_dialog(\n title=\"Add Another Root Folder \",\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n config.set('scan', \"root\", rootstr)\n\n confirm = button_dialog(\n title=\"Add a Folder or File to ignore \",\n buttons=[(\"Yes\", True), (\"No\", False),(\"Info\", None)],\n ).run()\n while confirm!=False:\n if confirm==None:\n message_dialog(\n title=\"Ignore Folders and Files\",\n text=\"Ignored Directories will not be scanned As a subdirectory of another Root Folder.\\nHowever note that a ignored Folder can still be added as a root .\\nIn that case the subdirectories of the ignore folder would be added\\nIgnored Files will not be added at all\",\n ).run()\n if confirm:\n ignorepath = input_dialog(title='Getting ignore Path ',text='Please Enter the Path to ignore:').run()\n\n if ignorepath==None:\n break\n addstring=\"Adding:\"+ignorepath + \" is this Okay? \"\n option = button_dialog(\n title=addstring,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n if addstring==True:\n ignorestr= ignorestr+ignorepath\n confirm = button_dialog(\n title=\"Add Another Folder to ignore \",\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n\n config.set('scan', \"ignore\", ignorestr)\n\n\n #setup next few options as empty\n config.set('general', \"txt\", \"\")\n config.set('grab', \"api\", \"\")\n config.set('grab', \"cookie\", \"\")\n config.set('grab', \"output\", \"\")\n config.set('general', \"misstxt\", \"\")\n config.set('grab', \"torrent\", \"\")\n\n\n confirm=False\n while confirm==False:\n txtpath = input_dialog(title='Scanner TXT File',text='Please Enter the Path for scanner and grabber.\\nFile Paths will Writen Here and is required ').run()\n if txtpath==None:\n break\n config.set('general', \"txt\", txtpath)\n confirmtxt=\"You entered:\"+txtpath+\" is this Okay?\"\n confirm = button_dialog(\n title=confirmtxt,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n confirm=False\n while confirm==False:\n torrent = input_dialog(title='Torrent Folder',text='Please Enter the Path for downloading Torrents\\nIf you leave this blank make sure to set Output\\nThat step will come up later in this setup\\nIt is okay to setup Both Torrent and Output\\nHowever if None are selected then Nothing will happen when Downloader finds a match').run()\n if torrent==None:\n break\n config.set('grab', \"torrent\", torrent)\n confirmtxt=\"You entered:\"+torrent+\" is this Okay?\"\n confirm = button_dialog(\n title=confirmtxt,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n\n\n\n confirm=False\n while confirm==False:\n key = input_dialog(title='AHD KEY',text='Please Enter your AHD passkey.\\n This will be used to Download Torrent Files and Scan AHD\\nThis is Required').run()\n if key==None:\n break\n config.set('grab', \"api\", key)\n confirmtxt=\"You entered:\"+key+\" is this Okay?\"\n confirm = button_dialog(\n title=confirmtxt,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n confirm=False\n while confirm==False:\n cookie = input_dialog(title='Cookie',text='You Will need a Cookie File For Downloading\\n[cookies.txt by Lennon Hill and Get cookies.txt are good options for exporting\\nfrom browser]\\nFile should be in .txt and not a json. Paste the path Here\\nPress Cancel to Leave Blank\\nThis is Required if you want to Download').run()\n if cookie==None:\n break\n config.set('grab', \"cookie\", cookie)\n confirmtxt=\"You entered:\"+cookie+\" is this Okay?\"\n confirm = button_dialog(\n title=confirmtxt,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n\n confirm= button_dialog(\n title=\"Do you want to Exclude Certain Sources\\nFor example all blu-ray encodes,etc\\nThese will be ignored during grabbing/matching\\nNote: Other are Files that don't fit in other selectors\\nPress Cancel to Leave Blank\",\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n excludestr=\"\"\n if confirm:\n exclude= checkboxlist_dialog(\n values=[\n (\"remux\", \"Remux\"),\n (\"blu\", \"Blu-Ray Encode\"),\n (\"tv\", \"HDTV\"),\n (\"web\", \"WEB\"),\n (\"other\", \"Other\"),\n ],\n title=\"Exclude\",\n text=\"Pick the Source Types you would like to ignore \",\n ).run()\n\n for type in exclude:\n excludestr=excludestr+type+\",\"\n config.set('grab', \"exclude\", excludestr)\n\n\n confirm=False\n while confirm==False:\n outpath = input_dialog(title='Download Links Output TXT',text='Please Enter a path for Writing Matched Links to.\\nWith This Every Time a Match is found a download url will be written here\\nPress Cancel to Leave Blank').run()\n if txtpath==None:\n break\n config.set('grab', \"output\", outpath)\n confirmtxt=\"You entered:\"+outpath+\" is this Okay?\"\n confirm = button_dialog(\n title=confirmtxt,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n\n confirm=False\n while confirm==False:\n missingpath = input_dialog(title='Missing Files Output TXT',text='Please Enter a path for Writing Potential Missing Files.\\nDuring a \"Missing Scan\" Every File is Compared to AHD Libary if the Slot is not already filled or your file is a encode.\\nThe Path will be written to this TXT File\\nThis is Required if you want to Find Files to upload').run()\n if txtpath==None:\n break\n config.set('general', \"misstxt\", missingpath)\n confirmtxt=\"You entered:\"+outpath+\" is this Okay?\"\n confirm = button_dialog(\n title=confirmtxt,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n\n\n\n\n\n\n fd=\"\"\n confirm=False\n while confirm==False:\n fd = input_dialog(title='FD' ,text='FD is required for Program\\nDownloads Can be found here https://github.com/sharkdp/fd/releases\\nBy Default the program comes with a version of fd for your OS\\nIf you want to use your own binary, you can enter your choice here \\nPress Cancel to use the Default ').run()\n if txtpath==None:\n break\n config.set('general', \"fd\", fd)\n confirmtxt=\"You entered:\"+fd+\" is this Okay?\"\n confirm = button_dialog(\n title=confirmtxt,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n wget=\"\"\n confirm=False\n while confirm==False:\n wget = input_dialog(title='WGET' ,text='WGET is required for Program\\nLinux comes with this Preinstalled usually for windows:https://eternallybored.org/misc/wget/\\nBy Default the program comes with a version of wget for Windows\\nIf you want to use your own binary, you can enter your choice here \\nPress Cancel to use the Default ').run()\n if txtpath==None:\n break\n config.set('general', \"wget\", wget)\n confirmtxt=\"You entered:\"+wget+ \" is this Okay?\"\n confirm = button_dialog(\n title=confirmtxt,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n\n\n\n\n\n\n\n\n sections = config.sections()\n config_string=\"\"\n for section in sections:\n options = config.options(section)\n for option in options:\n temp_dict={}\n temp_dict[option] = config.get(section,option)\n config_string=config_string+str(temp_dict)+\"\\n\"\n\n\n\n\n\n\n txt=\"These are the Options that will be written to the configfile\\nPlease Confirm if you want to save these Options\\n Current File wil be overwritten\\n\\n\"+config_string\n\n\n\n option = button_dialog(\n title=\"Confirm Options\",\n text=txt,\n buttons=[(\"Yes\", True), (\"No\", False)],\n ).run()\n if option==False:\n return\n with open(configpath, 'w') as configfile:\n print(\"Writing to configfile\")\n config.write(configfile)\n" }, { "alpha_fraction": 0.4727272689342499, "alphanum_fraction": 0.6969696879386902, "avg_line_length": 15.5, "blob_id": "2b5b96aaabafa176a4ba034948e0308419755a57", "content_id": "324d0e1110e5dbb5ba747f646486f42e01f2e666", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 330, "license_type": "no_license", "max_line_length": 22, "num_lines": 20, "path": "/requirements.txt", "repo_name": "excludedBittern8/ahd_cross_seed", "src_encoding": "UTF-8", "text": "babelfish==0.5.5\ncertifi==2020.12.5\nchardet==4.0.0\nconfigparser==5.0.1\nDateTime==4.3\ndocopt==0.6.2\nguessit==3.1.1\nidna==2.10\npathlib==1.0.1\nprompt-toolkit==3.0.8\npython-dateutil==2.8.1\npytz==2020.4\nrebulk==2.0.1\nrequests==2.25.1\nsix==1.15.0\nurllib3==1.26.2\nwcwidth==0.2.5\nzope.interface==5.2.0\nIMDbPY==2020.9.25\nxmltodict==0.12.0\n" } ]
2
loopinf/Hayman1stSystem
https://github.com/loopinf/Hayman1stSystem
0dfbf7dfd494b7621215a7193a9ca8b93d90799d
472a825abdee03216730093d2a8d0cf4296685f7
bfad75ec0ad2e763f0f7c58239ce7581284b8ce6
refs/heads/master
2021-09-01T06:57:46.223447
2017-12-25T14:31:46
2017-12-25T14:31:46
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6009762287139893, "alphanum_fraction": 0.6409395933151245, "avg_line_length": 32.89361572265625, "blob_id": "9bc496c821f95679387cbc06f5752b851b1ff41d", "content_id": "7cf2f6528c9490ca5254130de4b7704c3db22914", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3712, "license_type": "no_license", "max_line_length": 76, "num_lines": 94, "path": "/CallBuyAndSell.py", "repo_name": "loopinf/Hayman1stSystem", "src_encoding": "UTF-8", "text": "#조건에 따른 매도 매수를 시행하는 모듈입니다.\r\nimport win32com.client\r\n\r\ndef CybosConnection():\r\n objCpCybos = win32com.client.Dispatch(\"CpUtil.CpCybos\")\r\n bConnect = objCpCybos.IsConnect\r\n if (bConnect == 0):\r\n print(\"PLUS가 정상적으로 연결되지 않음. \")\r\n exit()\r\n\r\ndef ChoiseToAmountToBuy(AccountRatio,NowPrice,flag):\r\n objTrade = win32com.client.Dispatch(\"CpTrade.CpTdUtil\")\r\n objTrade.TradeInit(0)\r\n AccountNumber = objTrade.AccountNumber[0] # 계좌번호\r\n objTrade5331A = win32com.client.Dispatch(\"CpTrade.CpTdNew5331A\")\r\n\r\n objTrade5331A.SetInputValue(0, AccountNumber)\r\n objTrade5331A.SetInputValue(1, \"01\")\r\n objTrade5331A.BlockRequest()\r\n\r\n Amount = objTrade5331A.GetHeaderValue(45)\r\n StockRatio = int(Amount * AccountRatio)\r\n StockAmount = int(StockRatio / NowPrice - 1)\r\n \r\n if(flag == 0):\r\n print('계좌 :', AccountNumber)\r\n print('현재 잔고 :', Amount)\r\n print('주문금액 :', StockRatio)\r\n print('주문 수량 :', StockAmount)\r\n\r\n elif(flag == 1):\r\n return StockAmount\r\n\r\n else:\r\n print('잘못된 Flag입니다!')\r\n \r\ndef Buy(Code, HowMuchBuy): \r\n \r\n # 주문 초기화\r\n objTrade = win32com.client.Dispatch(\"CpTrade.CpTdUtil\")\r\n initCheck = objTrade.TradeInit(0)\r\n \r\n # 주식 매수 주문\r\n AccountNumber = objTrade.AccountNumber[0] # 계좌번호\r\n accFlag = objTrade.GoodsList(AccountNumber, 1) # 주식상품 구분\r\n print(AccountNumber, accFlag[0])\r\n objStockOrder = win32com.client.Dispatch(\"CpTrade.CpTd0311\")\r\n objStockOrder.SetInputValue(0, \"2\") # 2: 매수\r\n objStockOrder.SetInputValue(1, AccountNumber) # 계좌번호\r\n objStockOrder.SetInputValue(2, accFlag[0]) # 상품구분 - 주식 상품 중 첫번째\r\n objStockOrder.SetInputValue(3, Code) # 종목코드 - 입력받은 코드\r\n objStockOrder.SetInputValue(4, int(HowMuchBuy)) # 매수수량 200주\r\n objStockOrder.SetInputValue(5, 14100) # 주문단가 - 14,100원\r\n objStockOrder.SetInputValue(7, \"0\")\r\n objStockOrder.SetInputValue(8, \"03\") # 03 시장가로 주문합니다. \r\n \r\n # 매수 주문 요청\r\n objStockOrder.BlockRequest()\r\n \r\n rqStatus = objStockOrder.GetDibStatus()\r\n rqRet = objStockOrder.GetDibMsg1()\r\n print(\"통신상태\", rqStatus, rqRet)\r\n if rqStatus != 0:\r\n exit()\r\n\r\ndef Sell(Code, HowMuchBuy):\r\n objTrade = win32com.client.Dispatch(\"CpTrade.CpTdUtil\")\r\n initCheck = objTrade.TradeInit(0)\r\n if (initCheck != 0):\r\n print(\"주문 초기화 실패\")\r\n exit()\r\n\r\n # 주식 매도 주문\r\n acc = objTrade.AccountNumber[0] #계좌번호\r\n accFlag = objTrade.GoodsList(acc, 1) # 주식상품 구분\r\n print(acc, accFlag[0])\r\n objStockOrder = win32com.client.Dispatch(\"CpTrade.CpTd0311\")\r\n objStockOrder.SetInputValue(0, \"1\") # 1: 매도\r\n objStockOrder.SetInputValue(1, acc ) # 계좌번호\r\n objStockOrder.SetInputValue(2, accFlag[0]) # 상품구분 - 주식 상품 중 첫번째\r\n objStockOrder.SetInputValue(3, Code) # 종목코드 - 입력받은 코드\r\n objStockOrder.SetInputValue(4, HowMuchBuy) # 매도수량 10주\r\n objStockOrder.SetInputValue(5, 14100) # 주문단가 - 14,100원\r\n objStockOrder.SetInputValue(7, \"0\") # 주문 조건 구분 코드, 0: 기본 1: IOC 2:FOK\r\n objStockOrder.SetInputValue(8, \"03\") # 시장가\r\n \r\n # 매도 주문 요청\r\n objStockOrder.BlockRequest()\r\n \r\n rqStatus = objStockOrder.GetDibStatus()\r\n rqRet = objStockOrder.GetDibMsg1()\r\n print(\"통신상태\", rqStatus, rqRet)\r\n if rqStatus != 0:\r\n exit()" } ]
1
Miguel245/code_can
https://github.com/Miguel245/code_can
1011926f246d4dae610397176727151b0a52b9b4
079183b2df6c610d2470d133389fa53806d2a744
579899074d5aefc416cf0253e57573596f47e83e
refs/heads/master
2020-05-17T12:50:59.436072
2014-04-04T13:21:58
2014-04-04T13:21:58
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7275148630142212, "alphanum_fraction": 0.7305880188941956, "avg_line_length": 25.813186645507812, "blob_id": "88697332d41b62ec6776cfa795290a9afb67bf5d", "content_id": "cb0674e07ad546a4939f59cabee919a4ad747e31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4881, "license_type": "no_license", "max_line_length": 76, "num_lines": 182, "path": "/ruby_cucumber/realestate_steps.rb", "repo_name": "Miguel245/code_can", "src_encoding": "UTF-8", "text": "#features/step_definitions/realestate_steps.rb\n#This script navigates through all the main header links of the \n#realestate website\n#require 'realestate'\nrequire \"rubygems\"\nrequire \"selenium-webdriver\"\n\n#Firefox browser instantiation\ndriver = Selenium::WebDriver.for :firefox\n\n#Loading realestate URL\nGiven (/^I'm at the main page$/) do\n\tdriver.navigate.to \"http://www.realestate.com.au\"\nend\n\n#Clicking on the rent link present on the realestate home page\nWhen (/^I click on the rent link$/) do\n\tRentButton = driver.find_element(:link, \"Rent\")\n\tRentButton.click\n\tsleep 1\nend\n\n#Verify I'm on the actual rent page\nThen (/^I should be at the rent page$/) do\n\tRentText = driver.find_element(:id, \"rent\")\nend\n\t\n\t\n#Clicking on the invest link present on the realestate home page\nWhen (/^I click on the invest link$/) do\n\tInvestButton = driver.find_element(:link, \"Invest\")\n\tInvestButton.click\n\tsleep 2\nend\n\n#verify I\"m on the actual invest page\nThen (/^I should be at the invest page$/) do\n\tInvestText = driver.find_element(:id, \"invest\")\nend\n\n\n#Clicking on the sold link present on the realestate home page\nWhen (/^I click on the sold link$/) do\n\tSoldButton = driver.find_element(:link, \"Sold\")\n\tSoldButton.click\n\tsleep 4\nend\n\n#verify I'm on the actual sold page\nThen (/^I should be at the sold page$/) do\n\tSoldText = driver.find_element(:id, \"sold\")\nend\n\n\n#Clicking on the share link present on the realestate home page\nWhen (/^I click on the share link$/) do\n\tShareButton = driver.find_element(:link, \"Share\")\n\tShareButton.click\n\tsleep 2\nend\n\n#verify I'm on the actual share page\nThen (/^I should be at the share page$/) do\n\tShareText = driver.find_element(:id, \"share\")\nend\n\n#Clicking on the New homes link present on the realestate home page\nWhen (/^I click on the new homes link$/) do\n\tNewHomeButton = driver.find_element(:link, \"New homes\")\n\tNewHomeButton.click\n\tsleep 2\nend\n\n#verify I'm on the actual new homes page\nThen (/^I should be at the new homes page$/) do\n\tHomesText = driver.find_element(:id, \"new homes\")\nend\n\n#Clicking on the retire link present on the realestate home page\nWhen (/^I click on the retire link$/) do\n\tRetireButton = driver.find_element(:link, \"Retire\")\n\tRetireButton.click\n\tsleep 2\nend\n\n#verify I'm on the actual retire page\nThen (/^I should be at the retire page$/) do\n\tRetireText = driver.find_element(:id, \"retire\")\nend\n\n#Clicking on the find agents link present on the realestate home page\nWhen (/^I click on the find agents link$/) do\n\tFindAgentsButton = driver.find_element(:link, \"Find agents\")\n\tFindAgentsButton.click\n\tsleep 2\nend\n\n#verify I'm on the actual agents page\nThen (/^I should be at the find agents page$/) do\n\tAgentsText = driver.find_element(:id, \"find agents\")\nend\n\n\t\n#Clicking on the Home ideas link present on the realestate home page\nWhen (/^I click on the home ideas link$/) do\n\tHomeIdeasButton = driver.find_element(:link, \"Home ideas\")\n\tHomeIdeasButton.click\n\tsleep 1\nend\n\n#verify I'm on the actual home ideas page\nThen (/^I should be at the home ideas page$/) do\n\tHomeText = driver.find_element(:id, \"home ideas\")\nend\n\t\n\t\n#Clicking on the blog link present on the realestate home page\nWhen (/^I click on the blog link$/) do\n\tBlogButton = driver.find_element(:link, \"Blog\")\n\tBlogButton.click\n\tsleep 2\nend\n\n#verify I'm on the actual blog page\nThen (/^I should be at the blog page$/) do\n\tBlogText = driver.find_element(:id, \"blog\")\nend\n\n#Clicking on the commercial link present on the realestate home page\nWhen (/^I click on the commercial link$/) do\n\tCommercialButton = driver.find_element(:link, \"Commercial\")\n\tCommercialButton.click\n\tsleep 2\nend\n\n#verify I'm on the actual commercial page\nThen (/^I should be at the commercial page$/) do\n\tCommercialText = driver.find_element(:id, \"commercial\")\nend\n\n\n#Clicking on the sign in link present on the realestate home page\nWhen (/^I click on the sign in link$/) do\n\tSignInButton = driver.find_element(:link, \"Sign In\")\n\tSignInButton.click\n\tsleep 5\nend\n\n#verify I'm at the actual sign in page\nThen (/^I should be at the sign in page$/) do\n\tSignText = driver.find_element(:id, \"sign in\")\n\tdriver.navigate.to \"http://www.realestate.com.au\"\n\tsleep 5\nend\n\n\t\n#Clicking on the Join link present on the realestate home page\nWhen (/^I click on the join link$/) do\n\tJoinButton = driver.find_element(:link, \"Join\")\n\tJoinButton.click\n\tsleep 5\nend\n\n#verify Im at the actual join page\nThen (/^I should be at the join page$/) do\n\tJoinText = driver.find_element(:id, \"join\")\n\tdriver.navigate.to \"http://www.realestate.com.au\"\n\tsleep 5\nend\n\n\t\n#Clicking on the buy link (landing page) present on the realestate home page\nWhen (/^I click on the landing page$/) do\n\tLandingPageButton = driver.find_element(:link, \"Buy\")\n\tLandingPageButton.click\n\tsleep 2\nend\n\n#verify I'm at the actual landing page\nThen (/^I should be at the buy page$/) do\n\tBuyText = driver.find_element(:id, \"buy\")\nend\n\n" }, { "alpha_fraction": 0.7293790578842163, "alphanum_fraction": 0.7367933392524719, "avg_line_length": 33.269840240478516, "blob_id": "ebd9d6a3ce0d7e59f3f3652d56b1d5f967c7dd63", "content_id": "a679ad5ba733d2373e95e6232dbe7998e79887e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2158, "license_type": "no_license", "max_line_length": 84, "num_lines": 63, "path": "/python_lettuce/mail_api.py", "repo_name": "Miguel245/code_can", "src_encoding": "UTF-8", "text": "#This python/lettuce script, launches firefox browser at the target\n#site and opens the mail api to enter all the required values in \n#order to send email successfully. \n#from lettuce import *\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\ndriver = webdriver.Firefox()\ndriver.get(\"http://sendgrid.com/docs/api_workshop.html\")\n\ntime.sleep(5)\n\n# Click Mail link to expand Mail properties\n@step('I am at the mail api')\ndef at_the_mail_api(step):\n\tmailprop = driver.find_elements_by_xpath(\"//html/body/div/ul/li[6]/h3/div\").click()\n\n#Enter Parameter values \n# value for: to (recipient)\n@step('I enter mail properties')\ndef enter_mail_properties(step):\n\tmailprop = driver.find_element_by_name(\"params[to]\")\n\tmailprop.send_keys(\"[email protected]\")\n\t#value for: toname\n\tmailprop = driver.find_element_by_name(\"params[toname]\")\n\tmailprop.send_keys(\"Joe\")\n\t#value for: x-smtpapi\n\tmailprop = driver.find_element_by_name(\"params[x-smtpapi]\")\n\tmailprop.send_keys(\"'content-type': 'application/json'\")\n\t#value for: from\n\tmailprop = driver.find_element_by_name(\"params[from]\")\n\tmailprop.send_keys(\"[email protected]\")\n\t#value for: fromname\n\tmailprop = driver.find_element_by_name(\"params[fromname]\")\n\tmailprop.send_keys(\"Miguel\")\n\t#value for: subject\n\tmailprop = driver.find_element_by_name(\"params[subject]\")\n\tmailprop.send_keys(\"Soccer practice\")\n\t#value for: text\n\tmailprop = driver.find_element_by_name(\"params[text]\")\n\tmailprop.send_keys(\"Soccer camp and practice have been moved to the west park\")\n\t#value for: html\n\tmailprop = driver.find_element_by_name(\"params[html]\")\n\tmailprop.send_keys(\"<param name=autoplay value=true\")\n\t#value for: bcc\n\tmailprop = driver.find_element_by_name(\"params[bcc]\")\n\tmailprop.send_keys(\"[email protected]\")\n\t#value for: date\n\tmailprop = driver.find_element_by_name(\"params[date]\")\n\tmailprop.send_keys(\"Thur,04 April 2014 08:00:00 GMT\")\n\t#value for: headers\n\tmailprop = driver.find_element_by_name(\"params[headers]\")\n\tmailprop.send_keys(\"application/json\")\n\n#click 'Try it' button\n@step('I can send mail')\ndef can_send_mail(step):\n\tmailprop.driver.find_element_by_link_id(\"Mail\").click()\n\ntime.sleep(5)\n\ndriver.close()" }, { "alpha_fraction": 0.722806990146637, "alphanum_fraction": 0.7438596487045288, "avg_line_length": 29.464284896850586, "blob_id": "0d8ace774020ef305f7dc235078849ff302bd239", "content_id": "efc00e59f16f86ec02293ee2035a0a064079af7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 855, "license_type": "no_license", "max_line_length": 62, "num_lines": 28, "path": "/ruby_cucumber/realestate_search_steps.rb", "repo_name": "Miguel245/code_can", "src_encoding": "UTF-8", "text": "# this script performs a property search \n# matching a specific criteria (Richmond, VIC, flat, $500,000)\nrequire \"rubygems\"\nrequire \"selenium-webdriver\"\n\n#Firefox browser instantiation\ndriver = Selenium::WebDriver.for :firefox\n\n#Loading realestate URL\nGiven (/^I'm at the search page$/) do\n\tdriver.navigate.to \"http://www.realestate.com.au\"\n\tSearchButton1 = driver.find_element(:id, \"where\")\nend\n\nWhen (/^I search for richmond vic property$/) do\n\tSearchButton = driver.find_element(:id, \"where\")\n\tSearchButton.send_keys \"Richmond, VIC\"\n\tdriver.select(\"id=maxPrice\", \"500,000\")\n\tsleep 4\nend\n\nThen (/^I should see results$/) do\n\tSearch1Button = driver.find_element(:id, \"searchBtn\")\n\tSearch1Button.click\n\t#verify the search result matches\n\tSearch1Button = driver.find_element(:id, \"suburbLink\")\n\tSearch1Button = driver.find_element(:text, \"Richmond\")\nend\n\n\n" } ]
3
banana6742/webscraping
https://github.com/banana6742/webscraping
bdf11cfc28cae0b33aa2d93ac491211aae0a38ce
edb2fea4a3572fd3f25bfec9bec0a0f242756fbc
d0ad12e690f5ae92926ad232171e96bf54a26558
refs/heads/master
2020-06-18T07:37:00.527361
2019-07-12T09:45:25
2019-07-12T09:45:25
196,217,290
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5639716386795044, "alphanum_fraction": 0.6669503450393677, "avg_line_length": 13.383673667907715, "blob_id": "8fb73c6f293f4d45460d26d4baae7c7ce5cc6fb5", "content_id": "e78c3275d7f2c53590df44db13839681afe336d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3865, "license_type": "no_license", "max_line_length": 85, "num_lines": 245, "path": "/ML2.py", "repo_name": "banana6742/webscraping", "src_encoding": "UTF-8", "text": "import numpy as np\nTwoD = np.array([[2,3,8],[3,4,6],[2,4,6]])\nprint(TwoD)\nprint(TwoD[0][0])\nprint(TwoD[2][1])\nprint(TwoD[2,2])\n\nprint(TwoD[:2])\nprint(TwoD[:2,0:1])\n\nprint('-'*50)\n\nnew = np.arange(0,10)\n# 0~5まで出す\nslice_of_array=new[0:6]\nprint(slice_of_array)\n\n# 0~5まで99に変更\nslice_of_array[:]=99\nprint(slice_of_array)\n\nnew2 = new.copy()\nprint(new2)\n\n\nlohit=np.arange(1,11)\nprint(lohit)\n\nbool_arr=lohit>4\nprint(bool_arr)\n\nlohit[bool_arr]\n \narr = np.arange(25)\nranarr=np.random.randint(0,50,10)\nprint(ranarr)\nprint(arr)\nprint(arr[arr<5])\n\nfrom numpy.random import randint\n# 3~22までランダムに出す\nprint(randint(3,22))\n\n# 40個の行列、4行10列\narr_2d=np.arange(40).reshape(4,10)\nprint(arr_2d)\n\nprint('-'*50)\n\nimport numpy as np\n# 1行の4足演算\nnew_array = np.arange(0,11)\nprint(new_array)\nprint(new_array+new_array)\nprint(new_array-new_array)\nprint(new_array*new_array)\nprint(new_array/new_array)\n\nnew=new/12\nprint(new)\n\n\nnew_array=np.arange(1,10)\nnew=new_array+new_array\nprint(new)\nn=new/10\nprint(n)\nprint('-'*50)\n\n\n# 2次元の行列計算\nimport numpy as np\nTwoD = np.array([[2,3,8],[3,4,6],[2,4,6]])\nprint(TwoD)\nprint(TwoD[0][0])\nprint(TwoD[2][1])\nprint(TwoD[2,2])\n\nprint(TwoD[:2])\nprint(TwoD[:2,0:1])\n\nprint('-'*50)\n\n\n\nnew = np.arange(0,10)\nslice_of_array=new[0:6]\nprint(slice_of_array)\n\nslice_of_array[:]=99\nprint(slice_of_array)\n\nnew2 = new.copy()\nprint(new2)\n\nlohit=np.arange(1,11)\nprint(lohit)\n\n# 4より大きい数のTF確認\nbool_arr=lohit>4\nprint(bool_arr)\n\n\n\n\nlohit[bool_arr]\n\n\narr = np.arange(25)\nranarr=np.random.randint(0,50,10)\nranarr\nprint('ranarr: ', ranarr)\nprint(arr)\n\n\n\n\nprint('-'*50)\narr[arr<5]\n\n#3~22の数字をランダムに出す\nfrom numpy.random import randint\nrandint(3,22)\n\n# 40個の数字の4行10列表示\narr_2d=np.arange(40).reshape(4,10)\narr_2d\n\nprint('-'*50)\n\nimport numpy as np\n# 1行の配列同士の四則演算\nnew_array = np.arange(0,11)\nprint(new_array)\nprint(new_array+new_array)\nprint(new_array-new_array)\nprint(new_array*new_array)\nprint(new_array/new_array)\nnew=new_array+1000\nprint(new)\n\nnew = new_array*10\nnew\n\nnew=new/12\nnew\n\nnew_array=np.arange(1,10)\nnew=new_array+new_array\nnew\nn=new/10\nn\n\nok=np.arange(3,10)\nok\n\nnp.sqrt(ok)\nprint('np.sqrt(ok): ', np.sqrt(ok))\nnp.max(ok)\nprint('np.max(ok: ', np.max(ok)\nnp.min(ok)\nprint('np.min(ok: ', np.min(ok)\nnp.sin(ok)\nprint('np.sin(ok): ', np.sin(ok))\nnp.log(ok)\nprint('np.log(ok): ', np.log(ok))\n\n\nlog_number=np.arange(4,10)\nprint(np.log(log_number))\n\n\n\nn=np.zeros(10)\nprint(n)\n\nprint(n+5)\n\nprint(np.ones(10)*5)\n\nN= (np.arange(100).reshape(10,10)+1)/100\nprint(N)\n\nprint(np.arange(1,101).reshape(10,10)/100)\n\n# 0~1の数字を20等分して計算\nprint(np.linspace(0,1,20))\n# 10~50の数字を並べる\nprint(np.arange(10,51))\n\n# 3行3列の計算\nprint(np.arange(9).reshape(3,3))\n\nok=np.eye(3)\nprint(ok)\n\n\nprint('np.random.rand(): ', np.random.rand())\n# 0~50の数字を200等分して計算\nnp.linspace(0,50,200)\nprint('np.linspace(0,50,200): ', np.linspace(0,50,200))\n# 正規分布を25個適当に配列\nnp.random.randn(25)\nprint('np.random.randn(25): ', np.random.randn(25))\n\n# 10~50の数字を2個おきに配列、偶数の数字の配列、奇数の場合は+1\nnp.arange(10,51,2)\nprint('np.arange(10,51,2): ', np.arange(10,51,2))\n\n# 5行5列の25個の数字を配列\nn = np.arange(25).reshape(5,5)+1\nprint(n)\n\nnp.arange(1,26).reshape(5,5)\nprint('np.arange(1,26).reshape(5,5):\\n ', np.arange(1,26).reshape(5,5))\n\n\nprint('-'*70)\nprint(n[2:,1:])\n\nnp.sum(np.arange(1,26).reshape(5,5))\nprint('np.sum(np.arange(1,26).reshape(5,5)): ', np.sum(np.arange(1,26).reshape(5,5)))\n\nprint(n[4:5])\n\nprint(n[3:5])\n\n\n\n\n\n\nnp.array([[12,13,14,15],[17,18,19,20],[22,23,24,25]])\n\n#行列から抽出\nnp.array(n[0:3,1:2])\nprint('np.array(n[0:3,1:2]): ', np.array(n[0:3,1:2]))\n\n\n\n\nnp.sqrt(52)\nprint('np.sqrt(52): ', np.sqrt(52))\n\nx\n\n" }, { "alpha_fraction": 0.5008422136306763, "alphanum_fraction": 0.5626052618026733, "avg_line_length": 17.93617057800293, "blob_id": "11d27bbbb8e3ef78491193d16fedf1d6d56dc53c", "content_id": "10b13f7af26a780ad91157aabaad55ea9e3dbbe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1823, "license_type": "no_license", "max_line_length": 65, "num_lines": 94, "path": "/ML5.py", "repo_name": "banana6742/webscraping", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nleft = pd.DataFrame({'A':['A0','A1','A2'],\n 'B':['B0','B1','B2']},\n index=['K0','K1','K2'])\n\nright = pd.DataFrame({'C':['C0','C1','C2'],\n 'D':['D0','D1','D2']},\n index=['K0','K2','K3'])\n\nprint(left.join(right))\n\nprint('-'*80)\nprint(left.join(right,how='outer'))\n\nimport numpy as np\nimport pandas as pd\n\ndf = pd.DataFrame({'First':[100,200,300,400],\n 'Second':[10,20,20,100],\n 'Three':['abc','good','bad','student'],\n 'Four':['FB','Amazon','Microsoft','FlipKart']})\n\nprint(df)\nprint('-'*70)\nprint('-'*70)\n\n#unique values in data\nprint(df['Second'].unique)\nprint('-'*70)\n\n#get the length of the data\nprint(df['First'].nunique())\nprint('-'*70)\n\nprint(len(df['First'].unique()))\nprint('-'*70)\n\nprint(df['Second'].get_values())\n\n#we can add the second column\n\nprint(df['Second'].sum())\nprint('-'*70)\nprint(df['Second'].value_counts())\nprint('-'*70)\n\n#condition selection statements\n#true false\n\nprint(df['First']>100)\nprint('-'*70)\nprint('-'*70)\nprint('-'*70)\n\n\nprint(df[df['First']>100])\nprint('-'*70)\n# かつ\nprint(df[(df['First']>0)&(df['First']==100)])\nprint('-'*70)\n# Second列が1000以上、またはFour列がAmazonを抽出\nprint(df[(df['Second']>1000)|(df['Four']==\"Amazon\")])\nprint('-'*70)\nprint('-'*70)\nprint('-'*70)\n\n# First列の2乗を行う\ndef susumu(x):\n return x**2\n\nprint(df['First'].apply(susumu))\nprint('-'*70)\n\nprint(df['Four'].apply(len))\nprint('-'*70)\nprint('-'*70)\n\nprint(df['First'].apply(lambda x:x*2))\nprint('-'*70)\n\n#dropping the column\nprint(df.drop('First',axis=1))\n\n#print(df.drop('First',axis=1,inplace=True)\nprint('-'*70)\n\nprint(df.index)\nprint('-'*60)\nprint(df.sort_values(by=\"Three\"))\nprint('-'*70)\n\nprint(df.isnull())\nprint('-'*70)\n\n" }, { "alpha_fraction": 0.6651446223258972, "alphanum_fraction": 0.6818873882293701, "avg_line_length": 20.933332443237305, "blob_id": "55c8ce6d2e2078998a0f13ffeb88a0d1c241473a", "content_id": "85656b4aac13270f152ad555232a6978e722fce0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 80, "num_lines": 30, "path": "/excel/ML6-excelcsv.py", "repo_name": "banana6742/webscraping", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nprint(pd.read_csv('example.csv'))\n# to write\nprint('-'*60)\ndf=pd.read_csv('example.csv')\nprint(df)\n\nprint('-'*40)\n# df.to_csv('My_output')\n# df.to_csv('My_output.csv')\n# print(pd.read_csv('My_output.csv'))\n\n# read= pd.read_excel('Book2.xlsx',sheet_name='2011census')\n# print(read)\n# df.to_excel('example.xlsx',sheet_name='NewSheet')\n\ndata = pd.read_html('https://www.fdic.gov/bank/individual/failed/banklist.html')\n\n# print(type(data))\n\nprint(data[0].head())\n\n# print(df[0])\n\nfrom sqlalchemy import create_engine\nengine = create_engine('sqlite:///:memory:')\ndf.to_sql('data',engine)\nsql_df = pd.read_sql('data',con=engine)\nprint(sql_df)" }, { "alpha_fraction": 0.4255121052265167, "alphanum_fraction": 0.559124767780304, "avg_line_length": 5.44444465637207, "blob_id": "d0aa54ad489f2192a05c6f816e59ace65068ef71", "content_id": "c21abc4d7a4e4ce7fa45030e9c8eb8eb8708df71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2148, "license_type": "no_license", "max_line_length": 53, "num_lines": 333, "path": "/2ML.py", "repo_name": "banana6742/webscraping", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[2]:\n\n\nimport numpy as np\nTwoD = np.array([[2,3,8],[3,4,6],[2,4,6]])\nprint(TwoD)\nprint(TwoD[0][0])\nprint(TwoD[2][1])\nprint(TwoD[2,2])\n\nprint(TwoD[:2])\nprint(TwoD[:2,0:1])\n\n\n# In[3]:\n\n\nnew = np.arange(0,10)\nslice_of_array=new[0:6]\nprint(slice_of_array)\n\nslice_of_array[:]=99\nprint(slice_of_array)\n\nnew2 = new.copy()\nprint(new2)\n\n\n# In[5]:\n\n\nlohit=np.arange(1,11)\nlohit\n\nbool_arr=lohit>4\nbool_arr\n\n\n# In[10]:\n\n\nlohit[bool_arr]\n\n\n# In[11]:\n\n\narr = np.arange(25)\nranarr=np.random.randint(0,50,10)\nranarr\narr\n\n\n# In[12]:\n\n\narr[arr<5]\n\n\n# In[13]:\n\n\nfrom numpy.random import randint\n\n\n# In[14]:\n\n\nrandint(3,22)\n\n\n# In[15]:\n\n\narr_2d=np.arange(40).reshape(4,10)\narr_2d\n\n\n# In[16]:\n\n\nimport numpy as np\nnew_array = np.arange(0,11)\nnew_array\n\n\n# In[17]:\n\n\nnew_array+new_array\n\n\n# In[18]:\n\n\nnew_array-new_array\n\n\n# In[19]:\n\n\nnew_array*new_array\n\n\n# In[20]:\n\n\nnew_array/new_array\n\n\n# In[21]:\n\n\nnew=new_array+1000\nnew\n\n\n# In[22]:\n\n\nnew = new_array*10\nnew\n\n\n# In[23]:\n\n\nnew=new/12\nnew\n\n\n# In[25]:\n\n\nnew_array=np.arange(1,10)\nnew=new_array+new_array\nnew\nn=new/10\nn\n\n\n# In[26]:\n\n\nok=np.arange(3,10)\nok\n\n\n# In[27]:\n\n\nnp.sqrt(ok)\n\n\n# In[28]:\n\n\nnp.max(ok)\n\n\n# In[29]:\n\n\nnp.min(ok)\n\n\n# In[30]:\n\n\nnp.sin(ok)\n\n\n# In[32]:\n\n\nnp.log(ok)\n\n\n# In[34]:\n\n\nlog_number=np.arange(0,3)\nprint(np.log(log_number))\n\n\n# In[96]:\n\n\nn=np.zeros(10)\nn\n\n\n# In[105]:\n\n\nn+5\n\n\n# In[106]:\n\n\nnp.ones(10)*5\n\n\n# In[112]:\n\n\nN= (np.arange(100).reshape(10,10)+1)/100\nN\n\n\n# In[113]:\n\n\nnp.arange(1,101).reshape(10,10)/100\n\n\n# In[100]:\n\n\nnp.linspace(0,1,20)\n\n\n# In[42]:\n\n\nnp.arange(10,51)\n\n\n# In[108]:\n\n\nnp.arange(9).reshape(3,3)\n\n\n# In[109]:\n\n\nnp.eye(3)\n\n\n# In[110]:\n\n\nnp.random.rand()\n\n\n# In[111]:\n\n\nnp.linspace(0,50,200)\n\n\n# In[46]:\n\n\nnp.random.randn(25)\n\n\n# In[107]:\n\n\nnp.arange(10,51,2)\n\n\n# In[114]:\n\n\nn = np.arange(25).reshape(5,5)+1\nn\n\n\n# In[117]:\n\n\nn = np.arange(1,26).reshape(5,5)\nn\n\n\n# In[118]:\n\n\nprint(n[2:,1:])\n\n\n# In[77]:\n\n\nnp.sum(n)\n\n\n# In[80]:\n\n\nprint(n[4:5])\n\n\n# In[78]:\n\n\nprint(n[3:5])\n\n\n# In[116]:\n\n\n\n\n\n# In[62]:\n\n\nnp.array([[12,13,14,15],[17,18,19,20],[22,23,24,25]])\n\n\n# In[85]:\n\n\nnp.array(n[0:3,1:2])\n\n\n# In[104]:\n\n\nnp.sqrt(52)\n\n\n# In[103]:\n\n\nnp.arange(11,16)*5\n\n" }, { "alpha_fraction": 0.6464088559150696, "alphanum_fraction": 0.6685082912445068, "avg_line_length": 18.105262756347656, "blob_id": "12c96df0c138acb8a7d8ec4720ac5214606810eb", "content_id": "bee06c0828e4d2483c2b3702d8a99aee8a7672ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 47, "num_lines": 19, "path": "/excel/ML6.py", "repo_name": "banana6742/webscraping", "src_encoding": "UTF-8", "text": "import pandas as pd\n\nprint(pd.read_csv('example.csv'))\n# to write\nprint('-'*60)\ndf=pd.read_csv('example.csv')\nprint(df)\n\nprint('-'*40)\n# df.to_csv('My_output')\ndf.to_csv('My_output.csv')\nprint(pd.read_csv('My_output.csv'))\n\n\nprint('-'*40)\ndf.to_csv('My_output.csv',index=False)\nprint(pd.read_csv('My_output.csv'))\n\npd.read_excel('Book2.xlsx',sheet_name='Sheet1')" }, { "alpha_fraction": 0.42407944798469543, "alphanum_fraction": 0.44414564967155457, "avg_line_length": 26.91907501220703, "blob_id": "dba9fb022a8ef5bbac5bf6812fee38a82308e214", "content_id": "54950b6d0a5fc33b9c0785e8b3c4c9f4d353d905", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4948, "license_type": "no_license", "max_line_length": 72, "num_lines": 173, "path": "/ML3.py", "repo_name": "banana6742/webscraping", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nlabels = ['a','b','c','d','e']\nmy_data = [10,20,30,40,50]\n\narr=np.array(my_data)\n\nd= {'a':10,'b':20,'c':30,'d':40,'e':50}\nprint('__________________________________________')\n\nprint(pd.Series(data=my_data))\nprint('__________________________________________')\n#i want to have index as a,b,c.....\n\nprint(pd.Series(data=my_data,index=labels))\nprint('__________________________________________')\n\nprint(pd.Series(my_data,labels))\n# 順番を変えてみる。データ→ラベルの順番\nprint(pd.Series(labels,my_data))\nprint('__________________________________________')\n# simple way to create the Series\nprint(pd.Series(arr))\n\nprint('__________________________________________')\nprint(pd.Series(arr,labels))\n\nprint('__________________________________________')\nprint(pd.Series(d))\n\n# for index values as values\nprint(pd.Series(data=labels))\nprint('__________________________________________')\n\n# lets create the series with country names\nser1 = pd.Series([1,2,3,4,5],['USA','GERMANY','USSR','JAPAN','TURKEY'])\nprint(ser1)\n\nser2 = pd.Series([1,2,4,6,7],['USA','GERMANY','ITALY','FRANCE','INDIA'])\nprint(ser2)\n\nprint('__________________________________________')\n\nser3 = ser1 +ser2\nprint(ser3)\n\nimport numpy as np\nimport pandas as pd \nfrom numpy.random import randn\nnp.random.seed(101)\ndf = pd.DataFrame(randn(5,4),['A','B','C','D','E'],['W','X','Y','Z'])\nprint(df)\nprint('__________________________________________')\n\nprint(df['W'])\nprint(df['Z'])\nprint('__________________________________________')\n\n#lets check the datatype\n\nprint(type(df['W']))\nprint('__________________________________________')\n#some more ways u can extract the data\n\nprint(df.W)\nprint(df.Z)\nprint('__________________________________________')\n# 2つのリストを作るときは[[]]二重括弧で閉じる\nprint(df[['X','Y']])\nprint('__________________________________________')\n\n# try to add new column in the dataframe \ndf['new']= df['W']+df['Y']\nprint(df['new'])\nprint(df)\n\n#dropping the dataframe columns\n# print(df.drop('new'))\n#axis=1は存在している列 \n# axisかcolumnsのどちらかを使用する\n# print(df.drop('new',axis=1))\n# if itis true = \n# print(df.drop('new',axis=1,inplace=True))\nprint(df.drop('new',axis=1,inplace=False))\nprint('__________________________________________')\n\n# lets delete the rows\n# axis=1 is vertical axis=0 is horizontal\nprint(df.drop('E',axis=0))\nprint('__________________________________________')\nprint(df.drop('D',axis=0))\n\nprint(df.drop(['D','E'],axis=0))\nprint('__________________________________________')\nprint(df.loc['A'])\nprint('__________________________________________')\nprint(df.iloc[2])\nprint('__________________________________________')\nprint(df.loc['B','Y'])\n\nprint(df.loc[['A','B'],['W','Y']])\n\nprint('__________________________________________')\nprint('condition Selection')\nprint(df)\nprint(df>0)\n\nprint('__________________________________________')\nprint(df[df>0])\nprint('__________________________________________')\n\n#particular values within the column\nprint(df[df['W']>0])\nprint('__________________________________________')\nprint(df[df['W']>1])\nprint('__________________________________________')\nprint(df[df['W']>0]['Y'])\nprint('__________________________________________')\nprint(df[df['W']>0][['Y','X']])\nprint('__________________________________________')\n#take two conditons with and(&) with paranthesis\nprint(df[(df['W']>0) & (df['Y']>0)])\nprint('__________________________________________')\nprint(df)\n# reset the default index values 1,2,3,4,5,............\nprint(df.reset_index())\nprint('__________________________________________')\nnewind='CA NY WY OR CO'.split()\nprint(newind)\nprint('__________________________________________')\ndf['States']=newind\nprint(df)\nprint('__________________________________________')\nprint(df.set_index('States'))\n\n#Multi -Index and Index Hierachy\n# index levels\noutside = ['G1','G1','G1','G2','G2','G2']\ninside=[1,2,3,1,2,3]\nhier_index=list(zip(outside,inside))\nhier_index=pd.MultiIndex.from_tuples(hier_index)\nprint(hier_index)\nprint('__________________________________________')\n\ndf=pd.DataFrame(np.random.randn(6,2),index=hier_index,columns=['A','B'])\nprint(df)\nprint('__________________________________________')\n#select the perticular\nprint(df.loc['G1'])\nprint('__________________________________________')\n\n#this is slicing\nprint(df.loc['G1'].loc[1])\nprint('__________________________________________')\n# Im adding the index name to the dataframes\nprint(df.index.names)\ndf.index.names=['Group','Num']\nprint(df)\nprint(df.index.names)\nprint(df.xs('G1'))\nprint('__________________________________________')\n# selecting by G1 and also by there index values\nprint(df.xs(['G1',1]))\nprint('__________________________________________')\nprint(df.xs(['G1',3]))\n\nprint(df.xs(1,level='Num'))\n\nf = np.array([1,2,3])\ng = np.array([4,5,6])\n\nprint('Horizontal Append:\\n',np.vstack((f,g)))\nprint('Horizontal Append:',np.hstack((f,g)))\n " }, { "alpha_fraction": 0.6704213619232178, "alphanum_fraction": 0.6775135397911072, "avg_line_length": 32.29166793823242, "blob_id": "ec408329aea5b37ed2cd42e0e3cb1a8e58bd0c3c", "content_id": "8648e0a63ac00d8e2fa8604e9d445d584dd0e8cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2397, "license_type": "no_license", "max_line_length": 169, "num_lines": 72, "path": "/dssalary/ML7.py", "repo_name": "banana6742/webscraping", "src_encoding": "UTF-8", "text": "#Im using the kaggle dataset\n#Import pandas as pd**\nimport pandas as pd\n\n#**Read Salaries.csv as a dataframe called sal**.\nsal = pd.read_csv('Salaries.csv')\n\n# print(sal)\nprint(sal.head())\n#Check the head of the DataFrame/**\nprint(sal.info)\n\n# what is the average BasePay?\nprint(sal['BasePay'].mean())\n\n# what is the highest amount of overtimepay in the dataset?\nprint(sal['OvertimePay'].max())\n\n# what is the job title of JOSEPH DRISCOLL? \n# Im using the kaggle dataset\n\n\n# ** Import pandas as pd.**\nimport pandas as pd\n\n# ** Read Salaries.csv as a dataframe called sal.**\nsal=pd.read_csv('Salaries.csv')\nprint(sal)\nprint('------------------------------')\n\n# ** Check the head of the DataFrame. **\n\nprint(sal.head())\n# ** Use the .info() method to find out how many entries there are.**\nprint('------------------------------')\n\nprint(sal.info)\n\n# What is the average BasePay ?\nprint(sal['BasePay'].mean())\n\n# ** What is the highest amount of OvertimePay in the dataset ? **\nprint(sal['OvertimePay'].max())\n# ** What is the job title of JOSEPH DRISCOLL ? Note: Use all caps, otherwise you may get an answer that doesn't match up (there is also a lowercase Joseph Driscoll). **\n\nprint(sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['JobTitle'])\n# ** How much does JOSEPH DRISCOLL make (including benefits)? **\nprint(sal[sal['EmployeeName']=='JOSEPH DRISCOLL']['TotalPayBenefits'])\n# ** What is the name of highest paid person (including benefits)?**\n# print(sal['TotalPayBenefits'].max())\nprint(sal[sal['TotalPayBenefits']==sal['TotalPayBenefits'].max()])\n# print(sal.loc[sal['TotalPayBenefits'].idxmax()])\n\n# ** What is the name of lowest paid person (including benefits)? Do you notice something strange about how much he or she is paid?**\n# print(sal['TotalPayBenefits'].min())\nprint(sal[sal['TotalPayBenefits']==sal['TotalPayBenefits'].min()])\n\n\n# ** What was the average (mean) BasePay of all employees per year? (2011-2014) ? **\n# print(sal[sal['Year']=='2011:2014']['BasePay'].mean())\nprint(sal.groupby('Year').mean()['BasePay'])\n# ** How many unique job titles are there? **\n# print(sal['JobTitle'].unique)\nprint(sal['JobTitle'].nunique())\n# ** What are the top 5 most common jobs? **\nprint(sal['JobTitle'].value_counts().head())\n\n# ** How many people have the word Chief in their job title? (This is pretty tricky) **\n\n\n\n# ** Bonus: Is there a correlation between length of the Job Title string and Salary? **\n" }, { "alpha_fraction": 0.5223042964935303, "alphanum_fraction": 0.5723864436149597, "avg_line_length": 23.431438446044922, "blob_id": "5c1dfb98d631165c50838518b94fa1aae8682f2e", "content_id": "54d5e4d941a06ea358f276c4d10f0354ce98aad2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7432, "license_type": "no_license", "max_line_length": 103, "num_lines": 299, "path": "/ML4.py", "repo_name": "banana6742/webscraping", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom numpy.random import randn\n\nnp.random.seed(501)\n\nnew_df = pd.DataFrame(randn(6,5),['city','place','country','zip','name','floor'],['A','B','C','D','E'])\n\nprint(new_df)\nprint('-'*50)\nprint(new_df['A'])\nprint('-'*50)\nprint(new_df[['A','B']])\nprint('-'*50)\nprint(new_df.A)\nprint('-'*50)\nprint(new_df[['A','B','D']])\nprint('-'*50)\nnew_df['contact']=new_df['A']+new_df['B']+new_df['C']\nprint('-'*50)\nprint(new_df)\nprint('-'*50)\nprint(new_df.loc['city'])\nprint('-'*50)\nprint(new_df.loc[['city','place'],['A','B']])\nprint('-'*50)\nprint('-'*50)\nnew_df=pd.DataFrame(randn(3,2),['B','C','D'],['home','bungalow'])\nprint(new_df)\nprint('-'*50)\nprint(new_df[new_df['bungalow']<0])\nprint('-'*50)\nprint(new_df[new_df['bungalow']<0.6])\nprint('-'*50)\nstore_df=new_df[new_df['bungalow']>0]\nprint(store_df)\nprint('-'*50)\nprint(store_df['home'])\nprint('-'*50)\nprint(store_df['bungalow'])\nprint('-'*50)\n# print(new_df[new_df['home']>0][['B','A']])\nprint('-'*50)\n# print([new_df (new_df['home']>0) | (new_df['B']>1) ])\n\nprint(new_df[new_df['home']>0]['home'])\nprint(new_df[new_df['home']<5]['bungalow'])\n\n\nprint(new_df[new_df['home']>1]['home'])\n\nimport numpy as np\nimport pandas as pd\n\ndf=pd.DataFrame({'A':[1,2,np.nan],\n 'B':[np.nan,np.nan,np.nan],\n 'C':[1,2,3]})\n\nprint('-'*50)\nprint('-'*50)\nprint(df)\nprint('-'*50)\n#removes NaN values\n# by default axis=0\n# means works fine for only x-axis\n\nprint(df.dropna())\nprint('-'*50)\nprint(df.dropna(axis=1))\n# df.dropna()\nprint('-'*50)\n# thresh argument\n# And basically what that means is because row 1 had at least two non and a values 2.0 and the two it\n# find all Nan values and remove it\nprint(df.dropna(thresh=2))\nprint('-'*50)\nprint(df.dropna(thresh=3))\n\ndf=pd.DataFrame({'A':[1,2,np.nan],\n 'B':[5,np.nan,np.nan],\n 'C':[1,2,3]})\n\nprint('-'*50)\nprint('-'*50)\nprint(df)\nprint('-'*50)\n#removes NaN values\n# by default axis=0\n# means works fine for only x-axis\n\nprint(df.dropna())\nprint('-'*50)\nprint(df.dropna(axis=1))\n# df.dropna()\nprint('-'*50)\n# thresh argument\n# And basically what that means is because row 1 had at least two non and a values 2.0 and the two it\n# find all Nan values and remove it\nprint(df.dropna(thresh=2))\nprint('-'*50)\nprint(df.dropna(thresh=3))\n\n#A行のNAでない数を平均したり、足したりして埋める\nprint(df['A'].fillna(value=df['A'].mean()))\nprint(df['A'].fillna(value=df['A'].sum()))\n\nprint('_'*50)\n#grouping them\n\nimport numpy as np\nimport pandas as pd\n\ndata={'Company':['GOOG','GOOG','MSFT','MSFT','FB','FB'],\n 'Person':['Sam','Charile','Ama','Susume','Sue','Carl'],\n 'Sales':[200,400,233,233,565,342]\n}\n\ndf=pd.DataFrame(data)\nprint(df)\nprint('-'*50)\nprint(df.groupby)\n\nprint(df.groupby('Company'))\n\nbyComp=df.groupby('Company')\nprint(byComp)\n\nprint(byComp.mean())\nprint(byComp.sum())\nprint(byComp.std())\n\nprint('-'*50)\n\nprint(byComp.sum().loc['FB'])\n\nprint('-'*50)\n\nprint(df.groupby(\"Company\").sum().loc['MSFT'])\n\nprint(df.groupby(\"Company\").count())\nprint('-'*50)\nprint(df.groupby(\"Company\").max())\nprint('-'*50)\nprint(df.groupby(\"Company\").min())\nprint('-'*50)\n\nprint(byComp.describe())\nprint('-'*50)\nprint(df.groupby(\"Company\").describe().transpose())\nprint('-'*50)\nprint(df.groupby(\"Company\").describe().transpose()[\"FB\"])\n\nimport pandas as pd\nimport numpy as np\n\ndf1 = pd.DataFrame({'A':['A0','A1','A2','A3'],\n 'B':['B0','B1','B2','B3'],\n 'C':['C0','C1','C2','C3'],\n 'D':['D0','D1','D2','D3']},\n index=[0,1,2,3])\n\n\ndf2 = pd.DataFrame({'A':['A4','A5','A6','A7'],\n 'B':['B4','B5','B6','B7'],\n 'C':['C4','C5','C6','C7'],\n 'D':['D4','D5','D6','D7']},\n index=[4,5,6,7])\n\ndf3 = pd.DataFrame({'A':['A8','A9','A10','A11'],\n 'B':['B8','B9','B10','B11'],\n 'C':['C8','C9','C10','C11'],\n 'D':['D8','D9','D10','D11']},\n index=[8,9,10,11])\n\n\nprint(df1)\nprint('_'*60)\nprint(df2)\nprint('_'*60)\nprint(df3)\nprint('_'*60)\n\n# Concatenation\n# Concatenation basically glues together DataFrames. Keep in mind that dimensions should\n# match along the axis you are concatenating on.You can use pd. concat and pass in a list of\n# DataFrames to concatenate together:\n\nprint(pd.concat([df1,df2,df3]))\n# print(pd.concat([df1,df2,df3],axis=0))\nprint('_'*60)\nprint(pd.concat([df1,df2,df3],axis=1))\n\nleft = pd.DataFrame({'key':['K0','K1','K2','K3'],\n 'A' : ['A0','A1','A2','A3'],\n 'B' : ['B0','B1','B2','B3']})\n\nright = pd.DataFrame({'key':['K0','K1','K2','K3'],\n 'C' : ['C0','C1','C2','C3'],\n 'D' : ['D0','D1','D2','D3']})\nprint('_'*60)\nprint(left)\nprint('_'*60)\nprint(right)\nprint('_'*60)\n# merging\n# the **merge** function allows you to merge DataFrames together using a similar logic as\n# merging SQL Tables together. For examle:\n\n# instead of concatinating i will get common elmements\nprint(pd.merge(left,right,how='inner',on='key'))\n# The INNER JOIN selects all rows from both particeipating tables as long as there is a\n# match between the columns. An SQL INNER JOIN is same as JOIN clause,combining rows from \n# two or more tables.\nprint('_'*60)\nleft = pd.DataFrame({'key1':['K0','K0','K1','K2'],\n 'key2':['K0','K1','K0','K1'],\n 'A' : ['A0','A1','A2','A3'],\n 'B' : ['B0','B1','B2','B3']})\n\nright = pd.DataFrame({'key1':['K0','K1','K1','K2'],\n 'key2':['K0','K0','K0','K0'],\n 'C' : ['C0','C1','C2','C3'],\n 'D' : ['D0','D1','D2','D3']})\nprint('_'*60)\nprint(pd.merge(left,right,on=['key1','key2']))\n# Outer joins. When performing an inner join, rows from either table that are unmatched in \n# the other table are not returned.In an outer join, unmatched rows in one or both tables \n# can be returned\nprint('_'*60)\nprint(pd.merge(left,right,how='outer',on=['key1','key2']))\n# inner join はベン図の'かつ'、\n# outer join はベン図の'または'、\n\n\n#operations\nprint('_'*60)\nimport pandas as pd\ndf = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})\nprint(df.head())\n\n#info on unique values\n\nprint(df['col2'].unique())\nprint(len((df['col2'].unique())))\n\nprint(df['col2'].nunique())\n\n# counts how many times repeated\nprint(df['col2'].value_counts())\n\nprint('-'*70)\n\n#Select from DataFrame using criteria from multiple columns\nnewdf = df[(df['col1']>2) & (df['col2']==444)]\nprint('newdf: ', newdf)\nprint('-'*70)\n\ndef times2(x):\n return x*2\nprint(df['col1'].apply(times2))\n\nprint(df['col3'].apply(len))\n\nprint(df['col1'].sum())\nprint(df['col2'].sum())\n\ndel df['col1']\nprint(df)\n\nprint('-'*50)\n\ndf.columns\nprint('df.columns:',df.columns)\nprint('df.index: ', df.index)\n\n#sorting of the data\n#inplace= False by default\n# 最初はcol2の数字の大きさ順\nprint(df.sort_values(by='col2'))\n# 最初はcol2のアルファベット順\nprint(df.sort_values(by='col3'))\n\n#boolean values\n\nprint(df.isnull())\nprint('-'*50)\n\n#createing the new dataframe\n\ndata = {'A':['foo','foo','foo','bar','bar','bar'],\n 'B':['one','one','two','two','one','one'],\n 'C':['x','y','x','y','x','y'],\n 'D':[1,3,2,5,4,1]}\n\ndf = pd.DataFrame(data)\n#repeating values present\n\nprint(df)\nprint('-'*50)\n\n\n\n" } ]
8
sizeof/pylibmemcached
https://github.com/sizeof/pylibmemcached
7a6e923d8fe2d82a2330f7864a21f8491957b8fe
9e7405b6360bb0428903e84f160650204bd405fc
544ba58cb22d0a8bdac5e24771b70dd528ab49f2
refs/heads/master
2020-05-30T03:53:20.341007
2009-05-25T21:25:33
2009-05-25T21:25:33
207,054
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6913827657699585, "alphanum_fraction": 0.6973947882652283, "avg_line_length": 30.25, "blob_id": "f7719df0a60e2007a48b9c94386c3804a6b22212", "content_id": "a7513a96c843092cb238ab355f48f3157fe314c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "no_license", "max_line_length": 94, "num_lines": 16, "path": "/setup.py", "repo_name": "sizeof/pylibmemcached", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nfrom distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\n\nsetup(\n name = \"pylibmemcached\",\n version = \"0.1.0\",\n description=\"Python wrapper for libmemcached, a C client library to the Memcached server\",\n maintainer=\"sizeof\",\n maintainer_email=\"[email protected]\",\n cmdclass = {'build_ext': build_ext},\n ext_modules=[Extension('pylibmemcached', ['pylibmemcached.pyx'],\n libraries=['memcached'],\n )]\n)" } ]
1
juanjuanShu/codes
https://github.com/juanjuanShu/codes
12cea1d0fe0b16d9fbaee3853aeb3712e7ef2b7a
905e70e79a21832ed751598d15ed71c420a310ce
55e0710904a0997f663f9d5152547a71ea09ae83
refs/heads/main
2023-03-24T01:28:43.176486
2021-03-17T09:06:47
2021-03-17T09:06:47
310,597,736
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6567460298538208, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 28.647058486938477, "blob_id": "d83fa324ba39933294720ba6e84723b0598c351d", "content_id": "2f75f03458c95dfeec1e31da4608c9745ac7052f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 524, "license_type": "permissive", "max_line_length": 104, "num_lines": 17, "path": "/data_mining/join_based/Common.cpp", "repo_name": "juanjuanShu/codes", "src_encoding": "GB18030", "text": "#include \"Common.h\"\n\nbool Common::isRReachable(LocationType& loc1, LocationType& loc2) {\n return pow(loc1.first - loc2.first, 2) + pow(loc1.second - loc2.second, 2) <= _distance * _distance;\n}\n\nbool Common::multi_rel(CellPositionType& p1, CellPositionType& p2) {\n //格子划分为1,那么最大\n return abs(p1.first - p2.first) <= 1 && abs(p1.second - p2.second) <= _cellResolution * 2;\n}\n\nCommon::Common(double distance, double cellResolution)\n :_distance(distance),\n _cellResolution(cellResolution)\n{\n\n}\n" }, { "alpha_fraction": 0.7483039498329163, "alphanum_fraction": 0.7496607899665833, "avg_line_length": 22.365079879760742, "blob_id": "3449a2c5f00931dcc30544b0a97b38d0fd08f661", "content_id": "9747f6dfff8a1df2c6fa17628c1220c1f28dcea3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1474, "license_type": "permissive", "max_line_length": 83, "num_lines": 63, "path": "/data_mining/join_based/JoinBase.h", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "#pragma once\n#include \"stdafx.h\"\n#include \"Types.h\"\n#include \"Common.h\"\n#include \"MultiResolution.h\"\nclass Common;\n\nstruct Rule {\n\tColocationType antecedent;\n\tColocationType consequent;\n\tdouble conf;\n\n\tfriend bool operator < (struct Rule const&a, struct Rule const &b)\n\t{\n\t\tif (a.antecedent == b.antecedent) {\n\t\t\treturn a.consequent < b.consequent;\n\t\t}\n\t\telse {\n\t\t\treturn a.antecedent < b.antecedent;\n\t\t}\n\t}\n};\n\nclass JoinBase {\npublic:\n\tJoinBase(\n\t\tvector<InstanceType>& instances,\n\t\tdouble min_prev,\n\t\tdouble min_conf,\n\t\tdouble distance,\n\t\tbool fmul = true,\n\t\tdouble cellSize = 1\n\t);\n\n\tset<Rule> execute();\n\nprivate:\n\tdouble _min_prev;\n\tdouble _min_conf;\n\tdouble _distance;\n\tbool _fmul;\n\tdouble _cellSize;\n\tmap<FeatureType, map<InstanceIdType, LocationType>> _instances;\n\tmap<FeatureType, unsigned int> numOfInstances;\n\tmap<unsigned int,map<ColocationType, unsigned int>> _numOfColocations;\n\tmap<unsigned int, ColocationPackage> _prevalentColocation;\n\tvector<InstanceType> _true_instances;\n\tset<Rule> _rules;\n\n\tvector<ColocationType> _generateCandidateColocations_2();\n\n\tvector<ColocationType> _generateCandidateColocations_k(int k);\n\n\tColocationPackage _generateTableInstances(ColocationSetType& candidates, int k);\n\n\tvoid _selectPrevalentColocations(ColocationPackage& candidates, int k);\n\n\tbool _isSubsetPrevalent(ColocationType& candidates, int k);\n\n\tvoid _generateRules();\n\n\tunsigned int getRowInstancesOfColocationSub(const ColocationType& colocationSub);\n}; \n" }, { "alpha_fraction": 0.5460665225982666, "alphanum_fraction": 0.5697249174118042, "avg_line_length": 25.937824249267578, "blob_id": "b009c36e43765036f7dd321de4a9bd51ac465e48", "content_id": "0d1b19cc945e2ff38413cc1f4bdeafb5714305cb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5609, "license_type": "permissive", "max_line_length": 122, "num_lines": 193, "path": "/data_mining/apriori/apriori/apriori.cpp", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "// apriori.cpp : 此文件包含 \"main\" 函数。程序执行将在此处开始并结束。\n//\n\n// ConsoleApplication1.cpp : 此文件包含 \"main\" 函数。程序执行将在此处开始并结束。\n//\n\n#include <iostream>\n#include <set>\n#include <vector>\n#include <map>\n#include<algorithm>\n\n\nusing namespace std;\n\n//加载数据集\nvector < vector<string>> loadDataset()\n{\n vector < vector<string> > dataSet;\n\n vector <string> s1 = { \"I1\",\"I2\",\"I5\" };\n vector <string> s2 = { \"I2\",\"I4\" };\n vector <string> s3 = { \"I2\",\"I3\" };\n vector <string> s4 = { \"I1\",\"I2\",\"I4\" };\n vector <string> s5 = { \"I1\",\"I3\" };\n vector <string> s6 = { \"I2\",\"I3\" };\n vector <string> s7 = { \"I1\",\"I3\" };\n vector <string> s8 = { \"I1\",\"I3\",\"I2\",\"I5\" };\n vector <string> s9 = { \"I2\",\"I3\",\"I1\" };\n\n dataSet.push_back(s1);\n dataSet.push_back(s2);\n dataSet.push_back(s3);\n dataSet.push_back(s4);\n dataSet.push_back(s5);\n dataSet.push_back(s6);\n dataSet.push_back(s7);\n dataSet.push_back(s8);\n dataSet.push_back(s9);\n\n return dataSet;\n}\n\nmap< vector<string>, unsigned int > create_C1(vector < vector<string> >& dataSet) {\n map< vector<string>, unsigned int >C1;\n map< vector<string>, unsigned int >::iterator it_C1;\n //迭代器\n vector < vector<string> >::iterator it_data;\n vector<string>::iterator it_item;\n\n for (it_data = dataSet.begin(); it_data != dataSet.end(); it_data++) {\n for (it_item = (*it_data).begin(); it_item != (*it_data).end(); it_item++) {\n vector<string> tmp_item = {};\n tmp_item.push_back(*it_item);\n auto ret = C1.insert({ tmp_item,1 });\n //insert插入时候,由于map的key要求唯一,如果已经插入了,则返回值中的second的值为False\n //说明插入这个元素了\n if (!ret.second)\n ++ret.first->second;\n }\n }\n\n\n return C1;\n}\n\nmap< vector<string>, unsigned int > create_L1_by_C1(map< vector<string>, unsigned int >& C1, unsigned int min_sup_count) {\n map< vector<string>, unsigned int >::iterator it_C1;\n it_C1 = C1.begin();\n while (it_C1 != C1.end()) {\n if (it_C1->second < min_sup_count) {\n C1.erase(it_C1++);\n }\n else {\n it_C1++;\n }\n }\n\n for (auto& w : C1) {\n cout << w.first[0] << \" \" << w.second << endl;\n }\n return C1;\n}\n\nmap< vector<string>, vector<string> > create_map_L1(map< vector<string>, unsigned int > L1) {\n //获取key\n map< vector<string>, vector<string> > map_L1;\n map< vector<string>, unsigned int >::iterator it_L1;\n vector<string>::iterator it_item;\n vector<string> map_value;\n vector<string> map_key;\n\n for (it_L1 = L1.begin(); it_L1 != L1.end(); it_L1++) {\n map_value.push_back((it_L1->first)[0]);\n }\n\n map_key.push_back(\"0\");\n map_L1.insert(map<vector<string>, vector<string>>::value_type(map_key, map_value));\n\n return map_L1;\n}\n\nmap< vector<string>, vector<string> > create_C2(map< vector<string>, vector<string> >map_L) {\n map< vector<string>, vector<string> > map_Ck;\n map< vector<string>, vector<string> >::iterator it_map_L;\n vector<string>::iterator it_value, it_value2;\n vector<string> map_Ck_key;\n vector<string> map_Ck_value;\n\n\n for (it_map_L = map_L.begin(); it_map_L != map_L.end(); it_map_L++) {\n for (it_value = (it_map_L->second).begin(); it_value != (it_map_L->second).end() - 1; it_value++) {\n map_Ck_key = {}; map_Ck_value = {};\n map_Ck_key.push_back(*it_value);\n for (it_value2 = it_value + 1; it_value2 != (it_map_L->second).end(); it_value2++) {\n map_Ck_value.push_back(*it_value2);\n }\n map_Ck.insert(map<vector<string>, vector<string>>::value_type(map_Ck_key, map_Ck_value));\n }\n }\n\n return map_Ck;\n}\n\nvoid get_sup_count(map< vector<string>, vector<string> > map_Ck, vector < vector<string> >& dataSet, int k) {\n //根据map类型获得k项集\n //放入 无序map中\n //获得事务得到的k项集\n //遍历得到计数\n\n\n\n\n}\n//void create_Ck(map< vector<string>, vector<string> >map_L) {\n//\n//}\n\n\nvoid generate_Lk(vector < vector<string> >& dataSet, unsigned int min_sup_count) {\n //记录项集和统计计数\n map< vector<string>, unsigned int >C1;\n map< vector<string>, unsigned int >C2;\n map< vector<string>, unsigned int >L1;\n map< vector<string>, unsigned int >L2;\n\n map< vector<string>, vector<string> >map_L1;\n map< vector<string>, vector<string> >map_C2;\n map< vector<string>, vector<string> >map_Ck;\n vector< map< vector<string>, unsigned int > > L;\n\n //基于前缀存储的Ck map(k-1 ,k)\n map<vector<string>, vector<string>> prefix_Ck;\n\n //生成C1 map<项集 支持度计数>\n C1 = create_C1(dataSet);\n //L1\n L1 = create_L1_by_C1(C1, min_sup_count);\n //获得map存储\n map_L1 = create_map_L1(L1);\n\n L.push_back(L1);\n int k = 1;\n map_C2 = create_C2(map_L1);\n get_sup_count(map_C2, dataSet,k = 2);\n\n //create_Ck(map_C2);\n\n\n /*while (L[k - 1].size() > 0) {\n prefix_Ck = create_Ck(map_L1,k + 1);\n }*/\n\n\n\n\n\n\n // L1是map类型 key是 0 value是项集,生成C2 自连接,剪枝\n // 计算支持度计数:1)由事务生成 k 项集-Tk 2)将Lk 存储到 unorderedMap中,默认为0;\n // 3) 遍历 Tk去map中找,得到统计数\n\n}\n\nint main()\n{\n vector < vector<string> > dataSet;\n unsigned int min_sup_count = 2;\n\n dataSet = loadDataset();\n generate_Lk(dataSet, min_sup_count);\n\n}\n" }, { "alpha_fraction": 0.47071295976638794, "alphanum_fraction": 0.490060955286026, "avg_line_length": 25.370630264282227, "blob_id": "e85fb8ac484e147016a301c24c8accd1916bfcb2", "content_id": "743a54c3877a01bdd288f7deefc9080e1215f8b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3887, "license_type": "permissive", "max_line_length": 83, "num_lines": 143, "path": "/data_mining/apriori_V1.py", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "def load_data_set():\n data_set = [['l1', 'l2', 'l5'], ['l2', 'l4'], ['l2', 'l3'],\n ['l1', 'l2', 'l4'], ['l1', 'l3'], ['l2', 'l3'],\n ['l1', 'l3'], ['l1', 'l2', 'l3', 'l5'], ['l1', 'l2', 'l3']]\n return data_set\n\n\ndef create_C1(data_set):\n C1 = set()\n\n for t in data_set:\n for item in t:\n item_set = frozenset([item])\n C1.add(item_set)\n\n return C1\n\n\ndef generate_Lk_by_Ck(data_set, Ck, min_sup_count, support_data):\n Lk = set()\n item_count = {}\n\n for item in Ck:\n for t in data_set:\n if item.issubset(t):\n if item not in item_count:\n item_count[item] = 1\n else:\n item_count[item] += 1\n\n for key, value in item_count.items():\n if value >= min_sup_count:\n Lk.add(key)\n support_data[key] = value\n\n return Lk\n\n\ndef is_apriori(Ck_item, L):\n # k>=3 考虑候选集 k-项集 X={i1,i2,...,ik}。算法必须确定她的所有(k-1)集也是频繁的,即 X - ij\n for item in Ck_item:\n sub_Ck = Ck_item - frozenset([item])\n if sub_Ck not in L:\n return False\n return True\n\n\ndef create_Ck(L, k):\n Ck = set()\n L = list(L)\n lenL = len(L)\n\n for i in range(0, lenL):\n for j in range(i, lenL):\n l1 = list(L[i])\n l2 = list(L[j])\n l1.sort()\n l2.sort()\n\n if k == 2 and L[i] != L[j]:\n\n Ck_item = L[i] | L[j]\n Ck.add(Ck_item)\n elif k != 2 and l1[0:k - 2] == l2[0:k - 2]:\n Ck_item = L[i] | L[j]\n if is_apriori(Ck_item, L):\n Ck.add(Ck_item)\n\n return Ck\n\n\ndef generate_L(data_set, min_sup_count):\n # 存储频繁项集合和对应的支持度计数\n support_data = {}\n\n C1 = create_C1(data_set)\n L1 = generate_Lk_by_Ck(data_set, C1, min_sup_count, support_data)\n\n L = [0, L1]\n k = 2\n while (len(L[k - 1]) > 0):\n Ck = create_Ck(L[k - 1], k)\n if (len(Ck) > 0):\n Lk = generate_Lk_by_Ck(data_set, Ck, min_sup_count, support_data)\n L.append(Lk)\n k += 1\n else:\n k += 1\n break\n\n return L[k - 2], support_data\n\n\ndef generate_associate_rules(Lk, support_data, min_conf):\n sub_L = support_data.keys() - Lk\n\n len_sub_L = len(sub_L)\n len_L = len(Lk)\n\n sub_L = list(sub_L)\n Lk = list(Lk)\n\n associate_rules_list = []\n\n for i in range(0, len_L):\n for j in range(0, len_sub_L):\n if sub_L[j].issubset(Lk[i]):\n # 将 Y分成 x 和 Y - x;P(Y)/P(x)=conf =》关联规则: X=>Y-X conf\n conf = support_data[Lk[i]] / support_data[sub_L[j]]\n if conf >= min_conf:\n sub_item = Lk[i] - sub_L[j]\n associate_rule = [sub_L[j], sub_item, conf]\n associate_rules_list.append(associate_rule)\n\n return associate_rules_list\n\n\ndef format_conversion(items):\n res_str = ''\n items = list(items)\n items_len = len(items)\n\n for i in range(0, items_len):\n res_str = res_str + items[i] + '^'\n res_str = res_str.rstrip('^')\n\n return res_str\n\n\ndef visualization(associate_rules_list):\n len_L = len(associate_rules_list)\n for i in range(0, len_L):\n first_str = format_conversion(associate_rules_list[i][0])\n second_str = format_conversion(associate_rules_list[i][1])\n conf = str(associate_rules_list[i][2] * 100) + \"%\"\n print(first_str + ' => ' + second_str + ', ' + 'confidence = ' + conf)\n\n\nif __name__ == \"__main__\":\n data_set = load_data_set()\n Lk, support_data = generate_L(data_set, min_sup_count=2)\n associate_rules_list = generate_associate_rules(Lk, support_data, min_conf=0.5)\n visualization(associate_rules_list)\n\n\n" }, { "alpha_fraction": 0.6906857490539551, "alphanum_fraction": 0.6992939114570618, "avg_line_length": 29.41176414489746, "blob_id": "de76b2ff53dec16a95d49e647230f86c316563e7", "content_id": "5482912f87cbce0cac26602e0bc99d226f9f4181", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10741, "license_type": "permissive", "max_line_length": 129, "num_lines": 340, "path": "/data_mining/join_based/JoinBase.cpp", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "#include \"JoinBase.h\"\n#include \"MultiResolution.h\"\n#include \"Common.h\"\n#include <set>\n\nJoinBase::JoinBase(vector<InstanceType>& instances,double min_prev, double min_conf, double distance, bool fmul, double cellSize)\n\t:_min_prev(min_prev),\n\t_min_conf(min_conf),\n\t_distance(distance),\n\t_fmul(fmul),\n\t_true_instances(instances),\n\t_cellSize(cellSize){\n\tfor (auto it = instances.begin(); it != instances.end(); it++) {\n\t\tauto instanceId = get<InstanceIdType>(*it);\n\t\tauto feature = get<FeatureType>(*it);\n\t\tauto location = get<LocationType>(*it);\n\n\t\t_instances[feature][instanceId] = location;\n\n\t\t_prevalentColocation[1][{feature}].push_back({ instanceId });\n\n\t\t//_colocationNum (特征,数量)\n\t\tauto ret = numOfInstances.insert({ feature,1 });\n\t\tif (!ret.second) { ++ret.first->second; }\n\n\t\t_numOfColocations[1][{feature}] ++;\n\t}\n}\n\nvector<ColocationType> JoinBase::_generateCandidateColocations_2() {\n\tvector<FeatureType> colocations;\n\tvector<ColocationType> candidateColocations;\n\n\t//获取到实例类型,排序\n\tfor (auto it_data = numOfInstances.begin(); it_data != numOfInstances.end(); it_data++) {\n\t\tcolocations.push_back((*it_data).first);\n\t}\n\tsort(colocations.begin(), colocations.end());\n\t//A B C\n\tfor (unsigned int i = 0; i < colocations.size() - 1; i++) {\n\t\tfor (unsigned int j = i + 1; j < colocations.size(); j++) {\n\t\t\tcandidateColocations.push_back({ colocations [i],colocations [j]});\n\t\t}\n\t}\n\n\treturn candidateColocations;\n}\n\nbool JoinBase::_isSubsetPrevalent(ColocationType& candidates, int k) {\n\tif (k <= 2) return true;\n\n\tfor (unsigned int i = 0; i < candidates.size();i++) {\n\t\tColocationType candidatesCopy(candidates);\n\t\tcandidatesCopy.erase(candidatesCopy.begin() + i);\n\t\tif (!_prevalentColocation[k - 1].count(candidatesCopy)) {\n\t\t\treturn false;\n\t\t}\n\t}\n\t\n\treturn true;\n}\n\nvector<ColocationType> JoinBase::_generateCandidateColocations_k(int k){\n\tif (k == 2) return _generateCandidateColocations_2();\n\n\tvector<ColocationType> candidateColocations;\n\tColocationPackage& colocationPackage = _prevalentColocation[k - 1];\n\tColocationSetType C;\n\tvector<FeatureType> colocationSet;\n\tmap < ColocationType, ColocationType> trie_colocationSet = {};\n\n\t//get\n\tfor (auto it = colocationPackage.begin(); it != colocationPackage.end(); it++) {\n\t\tC.push_back((*it).first);\n\t}\n\tsort(C.begin(), C.end());\n\t\n\t//存储\n\tfor (unsigned int i = 0; i < C.size(); ++i) {\n\t\tcolocationSet = C[i];\n\t\tFeatureType lastElement = colocationSet.back();\n\t\tcolocationSet.pop_back();\n\t\tif (trie_colocationSet.find(colocationSet) == trie_colocationSet.end()) {\n\t\t\ttrie_colocationSet.insert({ colocationSet,{lastElement} });\n\t\t}\n\t\telse {\n\t\t\ttrie_colocationSet[colocationSet].push_back(lastElement);\n\t\t}\n\t}\n\n\t//连接\n\tfor (auto& item : trie_colocationSet) {\n\t\tColocationType candidate = item.first;\n\t\t//如果后面的k只有一个,则无法连接\n\t\tif (item.second.size() >= 2) {\n\t\t\tfor (auto it_value = (item.second).begin(); it_value != (item.second).end() - 1; it_value++) {\n\t\t\t\tfor (auto it_value1 = it_value + 1; it_value1 != (item.second).end(); it_value1++) {\n\t\t\t\t\tColocationType tmpCandidate(candidate);\n\t\t\t\t\ttmpCandidate.push_back(*it_value);\n\t\t\t\t\ttmpCandidate.push_back(*it_value1);\n\t\t\t\t\tif (_isSubsetPrevalent(tmpCandidate,k)) {\n\t\t\t\t\t\tcandidateColocations.push_back(tmpCandidate);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn candidateColocations;\n}\n\nColocationPackage JoinBase::_generateTableInstances(ColocationSetType &candidates, int k) {\n\tColocationPackage candidatePackage;\n\n\tfor (auto candidate : candidates) {\n\t\t//A B\n\t\t//A B C一定是由 A B(前 k - 1项)和 A C(k -2 加上最后一个)组成得到的\n\t\tColocationType candidate1(candidate.begin(),candidate.end() -1);\n\t\tColocationType candidate2(candidate.begin(),candidate.end() -2); \n\t\tcandidate2.push_back(candidate.back());\n\n\t\tTableInstanceType tableInstance1= _prevalentColocation[k - 1][candidate1];\n\t\tTableInstanceType tableInstance2= _prevalentColocation[k - 1][candidate2];\n\n\t\tfor (auto it1 = tableInstance1.begin(); it1 != tableInstance1.end(); it1++) {\n\t\t\tRowInstanceType& rowInstance1 = *it1;\n\t\t\tfor (auto it2 = tableInstance2.begin(); it2 != tableInstance2.end(); it2++) {\n\t\t\t\tRowInstanceType& rowInstance2 = *it2;\n\n\t\t\t\tbool canMerge = true;\n\t\t\t\tfor (unsigned int idx = 0; idx < k - 2; idx++) {\n\t\t\t\t\tif (rowInstance1[idx] != rowInstance2[idx]) {\n\t\t\t\t\t\tcanMerge = false;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tCommon* a = new Common(_distance, _cellSize);\n\t\t\t\tif (canMerge) {\n\t\t\t\t\tLocationType location1 = _instances[candidate1.back()][rowInstance1.back()];\n\t\t\t\t\tLocationType location2 = _instances[candidate2.back()][rowInstance2.back()];\n\t\t\t\t\tif (a->isRReachable(location1, location2)) {\n\t\t\t\t\t\tRowInstanceType rowNewInstance(rowInstance1);\n\t\t\t\t\t\trowNewInstance.push_back(rowInstance2.back());\n\t\t\t\t\t\tcandidatePackage[candidate].push_back(move(rowNewInstance));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn candidatePackage;\n}\n\nvoid JoinBase::_selectPrevalentColocations(ColocationPackage & candidatePackages,int k) {\n\tif (empty(candidatePackages)) return;\n\tfor (auto candidatePackage : candidatePackages) {\n\t\tColocationType colocations = candidatePackage.first;\n\t\tTableInstanceType tableInstances = candidatePackage.second;\n\n\t\t//初始化位图\n\t\tmap<FeatureType, vector<bool>> bitMap;\n\t\tfor (unsigned int i = 0; i < colocations.size(); i++) {\n\t\t\tFeatureType feature = colocations[i];\n\t\t\t//numOfInstances[feature]是feature的实例数\n\t\t\tbitMap[feature] = vector<bool>(numOfInstances[feature],false);\n\t\t}\n\t\t//A B:1 1,2 4 \n\t\tfor (auto rowInstance : tableInstances) {\n\t\t\tfor (unsigned int i = 0; i < colocations.size(); i++) {\n\t\t\t\tFeatureType feature = colocations[i];\n\t\t\t\t//rowInstance[i]是1,2...,从1开始,但是位图下标从0开始\n\t\t\t\tbitMap[feature][rowInstance[i] - 1] = true;\n\t\t\t}\n\t\t}\n\n\t\tbool isPrevalent = true;\n\t\tfor (auto it_bit = bitMap.begin(); it_bit != bitMap.end(); it_bit++) {\n\t\t\tFeatureType feature = (*it_bit).first;\n\t\t\tvector<bool> flag = (*it_bit).second;\n\n\t\t\tint count = 0;\n\t\t\tfor (unsigned int i = 0; i < flag.size(); i++) {\n\t\t\t\tif (flag[i]) count++;\n\t\t\t}\n\t\t\tif (count * 1.0 / flag.size() < _min_prev) {\n\t\t\t\tisPrevalent = false;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\t\n\t\t}\n\t\t\n\t\t//如果isPrevalent为true,才会保留\n\t\tif (isPrevalent) {\n\t\t\t_prevalentColocation[k][colocations] = tableInstances;\n\t\t}\n\t}\n\n\t//记录频繁模式和出现的次数\n\tauto prevalentPackages = _prevalentColocation[k];\n\tfor (auto prevalentPackage : prevalentPackages) {\n\t\tColocationType colocations = prevalentPackage.first;\n\t\tTableInstanceType tableInstances = prevalentPackage.second;\n\n\t\tset<RowInstanceType> rowInstanceSet;\n\t\tfor (auto rowInstance:tableInstances) {\n\t\t\tif (rowInstanceSet.find(rowInstance) == rowInstanceSet.end()) {\n\t\t\t\trowInstanceSet.insert(rowInstance);\n\t\t\t\t_numOfColocations[k][colocations] ++;\n\t\t\t}\n\t\t}\n\t}\n}\n\n//判断一个集合是另一个集合的子集\nbool issubset(const ColocationType &colocation_sub, const ColocationType &colocation) {\n\tset<FeatureType> sub_set(colocation_sub.begin(),colocation_sub.end());\n\tset<FeatureType> colocatioin_set(colocation.begin(), colocation.end());\n\tfor (auto& sub_item : sub_set) {\n\t\tif (colocatioin_set.find(sub_item) == colocatioin_set.end()) {\n\t\t\treturn false;\n\t\t}\n\t}\n\n\treturn true;\n}\n\nvector<unsigned int> getFeatureIdx\n\n\n(const ColocationType &colocation,const ColocationType & antecedent) {\n\tvector<unsigned int> featureIdx;\n\n\tint pos = 0;\n\t//A B ;A B C \n\tfor (unsigned int i = 0; i < colocation.size(); i++) {\n\t\tif (colocation[i] == antecedent[pos]) {\n\t\t\tfeatureIdx.push_back(i);\n\t\t\tpos++;\n\t\t}\n\t\tif (pos == antecedent.size()) break;\n\t}\n\n\treturn featureIdx;\n}\n\nunsigned int getProjectNumOfColocation(TableInstanceType tableInstance, vector<unsigned int> featureIdx) {\n\tset<RowInstanceType> rowInstanceProjectSet;\n\n\tfor (auto rowInstance : tableInstance) {\n\t\tRowInstanceType rowInstanceIds;\n\t\t//得到投影的模式的行实例个数\n\t\tfor (unsigned int i = 0; i < featureIdx.size(); i++) {\n\t\t\trowInstanceIds.push_back(rowInstance[featureIdx[i]]);\n\t\t}\n\t\trowInstanceProjectSet.insert(rowInstanceIds);\n\t}\n\n\treturn rowInstanceProjectSet.size();\n}\n\nunsigned int JoinBase::getRowInstancesOfColocationSub(const ColocationType& antecedent) {\n\treturn _prevalentColocation[antecedent.size()][antecedent].size();\n}\n\nvoid JoinBase::_generateRules() {\n\t//获取colocationSubSet\n\tColocationSetType colocationSubSet;\n\tColocationPackage colocationOnePackages = _prevalentColocation[1];\n\tfor (auto colocationPackage : colocationOnePackages) {\n\t\tColocationType colocation = colocationPackage.first;\n\t\tcolocationSubSet.push_back(colocation);\n\t}\n\n\t//a =>bc abc(投影bc)/bc\n\tint length = _prevalentColocation.size();\n\tfor (unsigned int k = 2; k <= length;k++) {\n\t\tColocationPackage colocationPackages = _prevalentColocation[k];\n\t\n\t\t//abc\n\t\tfor (auto colocationPackage : colocationPackages) {\n\t\t\tColocationType colocation = colocationPackage.first;\n\t\t\tTableInstanceType tableInstance = colocationPackage.second;\n\n\t\t\tfor (auto colocationSub : colocationSubSet) {\n\t\t\t\tif (!issubset(colocationSub, colocation)) {\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tColocationType antecedent;\n\t\t\t\t//abc - bc = a, a =>bc \n\t\t\t\t//条件概率是 abc中a去重 / bc\n\t\t\t\tset_difference(colocation.begin(), colocation.end(), \n\t\t\t\t\tcolocationSub.begin(), colocationSub.end(), \n\t\t\t\t\tback_inserter(antecedent));\n\n\t\t\t\t//abc=>colocation bc=>colocationSub\n\t\t\t\t//找出colocationSub在colocation中的feature的下标(按照字典序排序)\n\t\t\t\t//例如:colocation:A B C ;colocationSub:A C,则是 0,2\n\t\t\t\tvector<unsigned int> featureIdx = getFeatureIdx(colocation, antecedent);\n\t\t\t\t\n\t\t\t\t//获得分子:abc在ab投影下的行实例数\n\t\t\t\tunsigned int projectNumOfColocation = getProjectNumOfColocation(tableInstance, featureIdx);\n\t\t\t\t\n\t\t\t\t//分母\n\t\t\t\tunsigned int antecedentTableInstanceSize = getRowInstancesOfColocationSub(antecedent);\n\t\t\t\t\n\t\t\t\tdouble conf = projectNumOfColocation * 1.0 / antecedentTableInstanceSize;\n\t\t\t\tif (conf >= _min_conf) {\n\t\t\t\t\t\n\t\t\t\t\t_rules.insert(move(Rule{ antecedent, colocationSub, conf }));\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t//这一轮得到的低级放入下一轮 作为 colocationSubSet\n\t\t\tcolocationSubSet.push_back(colocation);\n\t\t}\n\t}\n}\n\n\nset<Rule> JoinBase::execute() {\n\tint k = 2;\n\n\twhile (_prevalentColocation.count(k - 1) && !_prevalentColocation[k - 1].empty()) {\n\t\tvector<ColocationType> candidates = _generateCandidateColocations_k(k);\n\t\tif (_fmul) {\n\t\t\t//用static,只会初始化一次\n\t\t\tstatic MultiResolution multiResolution(_true_instances, _min_prev, _cellSize, _distance, numOfInstances);\n\t\t\tmultiResolution.multiResolutionPruning(candidates, k);\n\t\t}\n\t\tColocationPackage candidatePackages = _generateTableInstances(candidates, k);\n\t\t_selectPrevalentColocations(candidatePackages, k);\n\t\tk++;\n\t}\n\n\t_generateRules();\n\n\treturn _rules;\n}" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5612233877182007, "avg_line_length": 28.04347801208496, "blob_id": "6866079df78d65f6b837f78784e406ffd1b75a20", "content_id": "9d81ae59f523be80643d57c28d6793364a89394a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9351, "license_type": "permissive", "max_line_length": 138, "num_lines": 322, "path": "/Apriori/Apriori.h", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "#ifndef APRIOR_H_INCLUDED\n#define APRIOR_H_INCLUDED\n\n#include <iostream>\n#include <string>\n#include <vector>\n#include <memory>\n#include <unordered_map>\n#include <algorithm>\n#include <cmath>\n#include <map>\n#include \"HashTree.h\"\n\ntemplate<typename T>\nusing ItemSet = std::vector<T>;\n\ntemplate<typename T>\nusing ItemSetSet = std::vector<ItemSet<T>>;\n\ntemplate<typename T>\nclass Apriori\n{\npublic:\n Apriori(std::shared_ptr<ItemSetSet<T>> transSet, double supp, double conf);\n\n //Apriori(std::string filename, double supp, double conf, std::string fileType = \"\");\n\n struct Rule {\n ItemSet<T> antecedent;\n ItemSet<T> consequent;\n double conf;\n };\n\nprivate:\n std::shared_ptr<ItemSetSet<T>> _transSet;\n\n double _transSize = 0;\n double _supp = 0; // support rate\n double _conf = 0; // confidence rate\n\n std::map<ItemSet<T>, int> _suppCount;\n std::map<int, std::shared_ptr<ItemSetSet<T>>> _freqSet;\n\n void _setSupp(double);\n void _setConf(double);\n\n // Generate frequent probable set containing k + 1 itemsets. Use F_k x F_k.\n std::shared_ptr<ItemSetSet<T>> _aprioriGen(std::shared_ptr<ItemSetSet<T>>, int k);\n\n // Build probable set hash tree.\n std::shared_ptr<HashTree<T>> _buildHashTree(std::shared_ptr<ItemSetSet<T>>);\n\n void _calculateSupportCount(\n ItemSet<T> &trans, \n std::shared_ptr<HashNode<T>> p,\n int remainder,\n int k, \n int lastPos\n );\n\n std::shared_ptr<ItemSetSet<T>> _generate1FrequentSet();\n \n // Calculate the frequent set with k element. The probSet will be insteaded by frequent set. \n // k should greater than 0.\n void _generateKFrequentSet(\n std::shared_ptr<ItemSetSet<T>> probSet, \n int k);\n\n std::shared_ptr<ItemSetSet<T>> _generateFrequentSet();\n\n void _generateRuleByItemset(\n ItemSet<T> &itemset, // the full itemset\n ItemSet<T> &consequent, // current consequent\n std::shared_ptr<ItemSet<Rule>> ans, // Collect rules.\n int pos // current position in the full itemset.\n );\n\n std::shared_ptr<ItemSet<Rule>> _generateRule();\n\npublic:\n std::shared_ptr<ItemSet<Rule>> execute();\n};\n\n\n\ntemplate<typename T>\nvoid Apriori<T>::_setSupp(double supp)\n{\n if (supp > 1.0 || supp < 0)\n {\n std::cerr << \"Support rate must be a positive number not greater than 1.\" << std::endl;\n }\n else\n {\n _supp = supp;\n }\n}\n\ntemplate<typename T>\nvoid Apriori<T>::_setConf(double conf)\n{\n if (conf > 1.0 || conf < 0)\n {\n std::cerr << \"Confidence rate must be a positive number not greater than 1.\" << std::endl;\n }\n else\n {\n _conf = conf;\n }\n}\n\ntemplate <typename T>\nApriori<T>::Apriori(std::shared_ptr<ItemSetSet<T>> transSet, double supp, double conf) : _transSize(transSet->size()), _transSet(transSet)\n{\n _setSupp(supp);\n _setConf(conf);\n}\n\ntemplate <typename T>\nstd::shared_ptr<ItemSetSet<T>> Apriori<T>::_aprioriGen(std::shared_ptr<ItemSetSet<T>> F, int k)\n{\n std::shared_ptr<ItemSetSet<T>> probSet(new ItemSetSet<T>);\n // Choose the first F_k\n for (int i = 0; i < (int)(*F).size(); ++i)\n {\n if ((int)(*F)[i].size() != k)\n return nullptr;\n\n // Choose the second F_k\n for (int j = i + 1; j < (int)F->size(); ++j)\n {\n if ((int)(*F)[j].size() != k) return nullptr;\n\n auto &itemset1 = (*F)[i], &itemset2 = (*F)[j];\n bool canMerge = true;\n for (int p = 0; p < k - 1; ++p)\n {\n if (itemset1[p] != itemset2[p])\n {\n canMerge = false;\n break;\n }\n }\n\n if (canMerge)\n {\n // First, insert the same value.\n probSet->push_back(ItemSet<T>(itemset1.begin(), itemset1.begin() + k - 1));\n // Then, compare the last values of the two itemsets.\n // If the last of itemset1 is less than the itemset2's, push back it first.\n if (itemset1.back() < itemset2.back())\n {\n (*probSet).back().push_back(itemset1.back());\n (*probSet).back().push_back(itemset2.back());\n }\n else\n {\n (*probSet).back().push_back(itemset2.back());\n (*probSet).back().push_back(itemset1.back());\n }\n }\n }\n }\n\n return probSet;\n}\n\ntemplate<typename T>\nstd::shared_ptr<HashTree<T>> Apriori<T>::_buildHashTree(std::shared_ptr<ItemSetSet<T>> probSet)\n{\n std::shared_ptr<HashTree<T>> hashTree = std::make_shared<HashTree<T>>();\n for(auto& probItem : (*probSet))\n {\n hashTree->insert(probItem);\n }\n return hashTree;\n}\n\ntemplate<typename T>\nvoid Apriori<T>::_calculateSupportCount(\n ItemSet<T> &trans, // Transaction set\n std::shared_ptr<HashNode<T>> p, // Pointer to the hash tree node.\n int remainder, // The number of vocant positions in combination.\n int k, // The length of combination would generate.\n int lastPos // The last position of chosen item in itemset.\n)\n{\n if((int)trans.size() - lastPos - 1 < remainder) return; // Impossible.\n\n static ItemSet<T> tmp; // It's static for global-like using.\n // Temporary itemset is static, so it just initialize once.\n if(remainder == k) {\n tmp.resize(k);\n }\n\n if(remainder == 0) \n {\n ++_suppCount[tmp];\n return;\n }\n\n for(unsigned int i = lastPos + 1; i < trans.size(); ++i)\n {\n if(!p->search(trans[i])) continue;\n\n tmp[k - remainder] = trans[i];\n _calculateSupportCount(trans, p->next(trans[i]), remainder - 1, k, i);\n }\n}\n\ntemplate<typename T>\nvoid Apriori<T>::_generateKFrequentSet(\n std::shared_ptr<ItemSetSet<T>> probSet, \n int k)\n{\n auto hashTree = _buildHashTree(probSet);\n //std::shared_ptr<std::map<ItemSet<T>, int>> suppCnt = std::make_shared<std::map<ItemSet<T>, int>>();\n\n // Every transaction would search in the probable hash tree.\n for(auto &trans : *_transSet)\n {\n _calculateSupportCount(trans, hashTree, k, k, -1);\n }\n\n auto it = probSet->begin();\n while(it != probSet->end())\n {\n if(_suppCount[*it] * 1.0 / _transSize < _supp)\n {\n it = probSet->erase(it);\n }\n else \n {\n it++;\n }\n }\n}\n\ntemplate<typename T>\nstd::shared_ptr<ItemSetSet<T>> Apriori<T>::_generate1FrequentSet()\n{\n //std::map<T, int> suppCnt;\n std::shared_ptr<ItemSetSet<T>> freqSet(new ItemSetSet<T>);\n for(auto &trans : *_transSet)\n {\n for(auto &item : trans)\n {\n ItemSet<T> tmp{item};\n if((_suppCount[tmp] == 0 && _supp == 0) || ++_suppCount[tmp] == ceil(_supp * _transSize)) {\n freqSet->push_back(tmp);\n }\n }\n }\n\n return _freqSet[1] = freqSet;\n}\n\ntemplate<typename T>\nstd::shared_ptr<ItemSetSet<T>> Apriori<T>::_generateFrequentSet()\n{\n std::shared_ptr<ItemSetSet<T>> F = _generate1FrequentSet();\n for(int i = 2; ; ++i) {\n std::shared_ptr<ItemSetSet<T>> probSet = _aprioriGen(F, i - 1); // generate probable set.\n _generateKFrequentSet(probSet, i); // generateKFrequentSet function has no returns. The probSet is the K frequent set.\n if(probSet->empty()) break;\n F = _freqSet[i] = probSet;\n }\n\n return F;\n}\n\ntemplate<typename T>\nvoid Apriori<T>::_generateRuleByItemset(\n ItemSet<T> &itemset,\n ItemSet<T> &consequent,\n std::shared_ptr<ItemSet<Rule>> ans,\n int pos)\n{\n // calculate subtraction which is antecedent\n ItemSet<T> antecedent;\n std::set_difference(itemset.begin(), itemset.end(), consequent.begin(), consequent.end(), std::back_inserter(antecedent));\n double conf = _suppCount[itemset] * 1.0 / _suppCount[antecedent];\n if(consequent.size() && conf < _conf) return;\n\n if(pos == itemset.size())\n {\n if(antecedent.empty() || consequent.empty()) return;\n ans->push_back(Rule{antecedent, consequent, conf});\n return;\n }\n\n _generateRuleByItemset(itemset, consequent, ans, pos + 1); // Leave itemset[i] in the antecedent.\n consequent.push_back(itemset[pos]);\n _generateRuleByItemset(itemset, consequent, ans, pos + 1); // Put itemset[i] to the consequent.\n consequent.pop_back(); // trackback\n}\n\ntemplate<typename T>\nstd::shared_ptr<ItemSet<typename Apriori<T>::Rule>> Apriori<T>::_generateRule()\n{\n std::shared_ptr<ItemSet<Rule>> rules = std::make_shared<ItemSet<Rule>>();\n for(int i = 2; _freqSet.count(i); ++i)\n {\n std::shared_ptr<ItemSetSet<T>> freqSet = _freqSet[i];\n for(auto &F : *freqSet)\n {\n ItemSet<T> tmp;\n _generateRuleByItemset(F, tmp, rules, 0);\n }\n }\n return rules;\n}\n\ntemplate<typename T>\nstd::shared_ptr<ItemSet<typename Apriori<T>::Rule>> Apriori<T>::execute()\n{\n _generateFrequentSet();\n\n return _generateRule();\n}\n\n#endif" }, { "alpha_fraction": 0.3983606696128845, "alphanum_fraction": 0.4540983736515045, "avg_line_length": 13.523809432983398, "blob_id": "d04bce51fb3dc37cb6dbb888afb9acdcecbb5dad", "content_id": "9954e453fdc8585654f0db8582fcd8ac39bc6d87", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 610, "license_type": "permissive", "max_line_length": 41, "num_lines": 42, "path": "/data_mining/apriori/apriori/test.cpp", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "\n#include<iostream>\nusing namespace std;\nint vis[100000], d[10000], ans = 1, n, k;\nchar str[100000];\nvoid dfs(int x, int cnt, int sum)\n{\n\tif (cnt == n - 1)\n\t{\n\t\tprintf(\"%d\\n\", sum);\n\t\treturn;\n\t}\n\tfor (int i = 0; i < k; i++)\n\t{\n\t\tif (vis[i] == 0)\n\t\t{\n\t\t\tvis[i] = 1;\n\t\t\tdfs(d[i], cnt + 1, sum * 10 + d[i]);\n\t\t\tvis[i] = 0;\n\t\t}\n\t}\n\treturn;\n}\nint main()\n{\n\tmemset(vis, 0, sizeof(vis));\n\twhile (cin >> str)\n\t{\n\t\tk = strlen(str);\n\t\tcin >> n;\n\t\tfor (int i = 0; i < k; i++)\n\t\t\td[i] = str[i] - '0';\n\t\tfor (int i = 0; i < k; i++)\n\t\t{\n\t\t\tans = 0;\n\t\t\tvis[i] = 1;\n\t\t\tdfs(d[i], ans, d[i]);\n\t\t\tvis[i] = 0;\n\t\t}\n\n\t}\n\treturn 0;\n}" }, { "alpha_fraction": 0.818411111831665, "alphanum_fraction": 0.818411111831665, "avg_line_length": 36.71428680419922, "blob_id": "bb2852c59845f5863ead6d34b19702f040261406", "content_id": "0a6f9973f1de24592dc63a282cb2ce93c97d01ad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 793, "license_type": "permissive", "max_line_length": 145, "num_lines": 21, "path": "/data_mining/join_based/MultiResolution.h", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "#pragma once\n#include \"stdafx.h\"\n#include \"Types.h\"\n\nclass MultiResolution\n{\npublic:\n\tMultiResolution(vector<InstanceType>& instances,double min_prev,double cellSize,double distance, map<FeatureType, unsigned int> NumOfInstances);\n\tvoid multiResolutionPruning(ColocationSetType &candidates,int k);\n\nprivate:\n\tdouble _min_prev;\n\tdouble _cellSize;\n\tdouble _distance;\n\tmap<unsigned int,map<ColocationType,MultiResolution_TableInstanceType>> _tableInstances;\n\tmap<CellPositionType,map<FeatureType,vector<InstanceIdType>>> _instances;\n\tmap<FeatureType, unsigned int> _numOfInstances;\n\n\tMultiResolution_ColocationPackage _generateTableInstances(ColocationSetType& candidates, int k);\n\tColocationSetType _selectPrevalentColocations(MultiResolution_ColocationPackage &candidatePackages,int k);\n};\n\n" }, { "alpha_fraction": 0.5253134965896606, "alphanum_fraction": 0.5490013957023621, "avg_line_length": 24.933734893798828, "blob_id": "6e96b7cb8af2544836eef8996c56177f4845ffd1", "content_id": "e48db6ad9d14eb839a0ca78e6d3a12ba3ebd664c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4936, "license_type": "permissive", "max_line_length": 122, "num_lines": 166, "path": "/data_mining/1.cpp", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "// apriori.cpp : 此文件包含 \"main\" 函数。程序执行将在此处开始并结束。\n//\n\n// ConsoleApplication1.cpp : 此文件包含 \"main\" 函数。程序执行将在此处开始并结束。\n//\n\n#include <iostream>\n#include <set>\n#include <vector>\n#include <map>\n#include<algorithm>\n\n\nusing namespace std;\n\n//加载数据集\nvector < vector<string>> loadDataset()\n{\n vector < vector<string> > dataSet;\n\n vector <string> s1 = { \"I1\",\"I2\",\"I5\" };\n vector <string> s2 = { \"I2\",\"I4\" };\n vector <string> s3 = { \"I2\",\"I3\" };\n vector <string> s4 = { \"I1\",\"I2\",\"I4\" };\n vector <string> s5 = { \"I1\",\"I3\" };\n vector <string> s6 = { \"I2\",\"I3\" };\n vector <string> s7 = { \"I1\",\"I3\" };\n vector <string> s8 = { \"I1\",\"I3\",\"I2\",\"I5\" };\n vector <string> s9 = { \"I2\",\"I3\",\"I1\" };\n\n dataSet.push_back(s1);\n dataSet.push_back(s2);\n dataSet.push_back(s3);\n dataSet.push_back(s4);\n dataSet.push_back(s5);\n dataSet.push_back(s6);\n dataSet.push_back(s7);\n dataSet.push_back(s8);\n dataSet.push_back(s9);\n\n return dataSet;\n}\n\nmap< string, unsigned int > create_C1(vector < vector<string> >& dataSet) {\n map< string, unsigned int >C1;\n //迭代器\n vector < vector<string> >::iterator it_data;\n vector<string>::iterator it_item;\n\n for (it_data = dataSet.begin(); it_data != dataSet.end(); it_data++) {\n for (it_item = (*it_data).begin(); it_item != (*it_data).end(); it_item++) {\n auto ret = C1.insert({ *it_item,1 });\n //insert插入时候,由于map的key要求唯一,如果已经插入了,则返回值中的second的值为False\n //说明插入这个元素了\n if (!ret.second)\n ++ret.first->second;\n }\n }\n\n return C1;\n}\n\nmap< string, unsigned int > create_L1_by_C1(map< string, unsigned int >& C1, unsigned int min_sup_count) {\n map< string, unsigned int >::iterator it_C1;\n it_C1 = C1.begin();\n while (it_C1 != C1.end()) {\n if (it_C1->second < min_sup_count) {\n C1.erase(it_C1++);\n }\n else {\n it_C1++;\n }\n }\n\n return C1;\n}\n\nmap<vector<string>, vector<string>> create_Ck(vector < vector<string> >& dataSet, map< string, unsigned int > L, int k) {\n //void create_Ck(vector < vector<string> >& dataSet, map< string, unsigned int > L, int k) {\n //Ck map(k-1,k) 方便自连接 也剪去了一部分候选集(需要Lk-1)\n //Ck 用hash存储,获取计数\n\n\n vector<string> Lj;\n //vector<string>::iterator it_item;\n vector<string>::iterator it_Ck, it_Ck1;\n\n map<vector<string>, vector<string>> Ck;\n //map<vector<string>, vector<string>>::iterator it_map;\n\n //获取map中的key值\n for (auto& item : L) {\n Lj.push_back(item.first);\n }\n sort(Lj.begin(), Lj.end());\n\n\n //利用性质 预剪枝\n //用前缀项集存储,key:k-1项 value:第k项\n //基于前缀项集的连接 相同前缀中的value任意两项进行连接\n //基于前缀项集的剪枝:根据性质:频繁项集的所有非空子集也必须是频繁的。即得到的 k 项集的子集(k - 1)项集也必须是频繁的;\n // value( k - 1)项是否在 L(k-1)项中 如果不在 则删除\n if (k == 2) {\n for (it_Ck = Lj.begin(); it_Ck != Lj.end() - 1; it_Ck++) {\n for (it_Ck1 = it_Ck + 1; it_Ck1 != Lj.end(); it_Ck1++) {\n //cout << *it_Ck << \" \" << *it_Ck1 << endl;\n vector<string> tmp_key;\n vector<string> tmp_value;\n tmp_key.push_back(*it_Ck);\n tmp_value.push_back(*it_Ck1);\n Ck.insert(map<vector<string>, vector<string>>::value_type(tmp_key, tmp_value));\n }\n }\n }\n else {\n //拿到的是map key相同 value两两组合\n\n\n }\n\n return Ck;\n}\n\nvoid generate_Lk(vector < vector<string> >& dataSet, unsigned int min_sup_count) {\n map< string, unsigned int >C1;\n map< string, unsigned int >L1;\n vector< map< string, unsigned int > > L;\n //基于前缀存储的Ck map(k-1 ,k)\n map<vector<string>, vector<string>> prefix_Ck;\n\n //生成C1 map<项集 支持度计数>\n C1 = create_C1(dataSet);\n //L1\n L1 = create_L1_by_C1(C1, min_sup_count);\n\n L.push_back(L1);\n\n // L1是map类型 key是 0 value是项集,生成C2 自连接,剪枝\n // 计算支持度计数:1)由事务生成 k 项集-Tk 2)将Lk 存储到 unorderedMap中,默认为0;\n // 3) 遍历 Tk去map中找,得到统计数\n\n\n\n int k = 1;\n while (L[k - 1].size() > 0) {\n\n\n prefix_Ck = create_Ck(dataSet, L[k -1], k+1);\n break;\n //L1 = generate_Lk_by_Ck(dataSet, C1, min_sup_count);\n\n }\n\n\n\n}\n\nint main()\n{\n vector < vector<string> > dataSet;\n unsigned int min_sup_count = 2;\n\n dataSet = loadDataset();\n generate_Lk(dataSet, min_sup_count);\n\n}\n\n" }, { "alpha_fraction": 0.7988165616989136, "alphanum_fraction": 0.7988165616989136, "avg_line_length": 31.70967674255371, "blob_id": "47b50982306460cf274f1d3b774e1bf99ff91e3e", "content_id": "9b0403fc8c41f68e2e7e6aa338fd0cee894284fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1076, "license_type": "permissive", "max_line_length": 97, "num_lines": 31, "path": "/data_mining/join_based/Types.h", "repo_name": "juanjuanShu/codes", "src_encoding": "GB18030", "text": "#pragma once\n#include \"stdafx.h\"\n\n//实例<实例ID,空间特征类型,位置>\nusing InstanceIdType = unsigned int;\nusing FeatureType = unsigned char;\nusing LocationType = pair<double, double>;\nusing InstanceType = tuple<InstanceIdType, FeatureType, LocationType>;\n\n//行实例和表实例\nusing RowInstanceType = vector<InstanceIdType>;\nusing TableInstanceType = vector<RowInstanceType>;\n\n//co-location模式\nusing ColocationType = vector<FeatureType>;\n//候选模式,只存储特征 vector<vector<FeatureType>> \nusing ColocationSetType = vector<ColocationType>;\nusing ColocationPackage = map<ColocationType, TableInstanceType>; \n\n//MultiResolution part\nusing CellPositionType = pair<int,int>;\nusing MultiResolution_RowInstanceType = vector<CellPositionType>;\nusing MultiResolution_TableInstanceType = vector<MultiResolution_RowInstanceType>;\nusing MultiResolution_ColocationPackage = map<ColocationType, MultiResolution_TableInstanceType>;\n\n//struct Rule {\n//\tColocationType antecedent;\n//\tColocationType consequent;\n//\tdouble conf;\n//};\n//using RuleType = set<Rule>;\n" }, { "alpha_fraction": 0.7027522921562195, "alphanum_fraction": 0.7137614488601685, "avg_line_length": 34.324073791503906, "blob_id": "79623e17989a8032f8797110eddc021a5b7622b5", "content_id": "a33e280750fc5b8ba39fbbb9311726cfecfd4f1e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3945, "license_type": "permissive", "max_line_length": 162, "num_lines": 108, "path": "/data_mining/join_based/MultiResolution.cpp", "repo_name": "juanjuanShu/codes", "src_encoding": "GB18030", "text": "#include \"MultiResolution.h\"\n#include \"Common.h\"\n#include <set>\n\nMultiResolution::MultiResolution(vector<InstanceType>& instances, double min_prev, double cellSize,double distance, map<FeatureType, unsigned int> numOfInstances)\n:_min_prev(min_prev),\n_cellSize(cellSize),\n_distance(distance),\n_numOfInstances(numOfInstances){\n\tfor (auto it = instances.begin(); it != instances.end(); it++) {\n\t\tauto instanceId = get<InstanceIdType>(*it);\n\t\tauto feature = get<FeatureType>(*it);\n\t\tauto location = get<LocationType>(*it);\n\n\t\tdouble realX = location.first, realY = location.second;\n\t\tint x = realX / cellSize, y = realY / cellSize;\n\n\t\t_tableInstances[1][{feature}].push_back({ { x,y } });\n\t\t//记录该单元格中的相关元素,反向求解!!!!\n\t\t_instances[{ x, y }][feature].push_back({ instanceId });\n\t}\n}\n\nColocationSetType MultiResolution::_selectPrevalentColocations(MultiResolution_ColocationPackage &candidatePackages, int k) {\n\tColocationSetType prevalence;\n\n\tif (!empty(candidatePackages)) {\n\t\tfor (auto candidatePackage : candidatePackages) {\n\t\t\tColocationType candidate = candidatePackage.first;\n\t\t\tMultiResolution_TableInstanceType tableInstances = candidatePackage.second;\n\t\t\t\n\t\t\t//判断每个candidate参与率是否大于等于_min_prev\n\t\t\tbool isPrevalent = true;\n\t\t\t//每个feature开始统计\n\t\t\tfor (unsigned int i = 0; i < candidate.size(); i++) {\n\t\t\t\tint count = 0;\n\t\t\t\tFeatureType feature = candidate[i];\n\t\t\t\tset<CellPositionType> cellPositionSet;// Record whether the instances in the cell are counted.\n\t\t\t\tfor (auto& rowInstance : tableInstances) {\n\t\t\t\t\tCellPositionType cellPosition = rowInstance[i];\n\t\t\t\t\t//如果找不到,那么插入该元素,并统计一次表格中的元素\n\t\t\t\t\tif (cellPositionSet.find(cellPosition) == cellPositionSet.end()) {\n\t\t\t\t\t\tcellPositionSet.insert(cellPosition);\n\t\t\t\t\t\tcount += (int)_instances[cellPosition][feature].size();\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tdouble conf = count * 1.0 / _numOfInstances[feature];\n\t\t\t\tif (conf < _min_prev) {\n\t\t\t\t\tisPrevalent = false;\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (isPrevalent) {\n\t\t\t\t_tableInstances[k][candidate] = tableInstances;\n\t\t\t\tprevalence.push_back(candidate);\n\t\t\t}\n\t\t}\n\t}\n\t\n\treturn prevalence;\n}\n\nMultiResolution_ColocationPackage MultiResolution::_generateTableInstances(ColocationSetType& candidates, int k) {\n\tMultiResolution_ColocationPackage multiResolution_ColocationPackage;\n\n\tfor (auto candidate : candidates) {\n\t\tColocationType candidate1(candidate.begin(), candidate.end() - 1);\n\t\tColocationType candidate2(candidate.begin(), candidate.end() - 2);\n\t\tcandidate2.push_back(candidate.back());\n\n\t\tMultiResolution_TableInstanceType tableInstance1 = _tableInstances[k - 1][candidate1];\n\t\tMultiResolution_TableInstanceType tableInstance2 = _tableInstances[k - 1][candidate2];\n\n\t\tfor (auto it1 = tableInstance1.begin(); it1 != tableInstance1.end(); it1++) {\n\t\t\tMultiResolution_RowInstanceType& rowInstance1 = *it1;\n\t\t\tfor (auto it2 = tableInstance2.begin(); it2 != tableInstance2.end(); it2++) {\n\t\t\t\tMultiResolution_RowInstanceType& rowInstance2 = *it2;\n\n\t\t\t\tbool canMerge = true;\n\t\t\t\tfor (int idx = 0; idx < k - 2; idx++) {\n\t\t\t\t\tif (rowInstance1[idx] != rowInstance2[idx]) {\n\t\t\t\t\t\tcanMerge = false;\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tCommon* a = new Common(_distance, _cellSize);\n\t\t\t\tif (canMerge) {\n\t\t\t\t\tCellPositionType& cell1 = rowInstance1.back(), & cell2 = rowInstance2.back();\n\t\t\t\t\tif (a->multi_rel(cell1, cell2)) {\n\t\t\t\t\t\tMultiResolution_RowInstanceType newRowInstance(rowInstance1);\n\t\t\t\t\t\tnewRowInstance.push_back(rowInstance2.back());\n\t\t\t\t\t\tmultiResolution_ColocationPackage[candidate].push_back(move(newRowInstance));\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn multiResolution_ColocationPackage;\n}\n\nvoid MultiResolution::multiResolutionPruning(ColocationSetType& candidates, int k) {\n\tMultiResolution_ColocationPackage candidatePackages = _generateTableInstances(candidates, k);\n\tcandidates = _selectPrevalentColocations(candidatePackages,k);\n}\n" }, { "alpha_fraction": 0.7493540048599243, "alphanum_fraction": 0.7596899271011353, "avg_line_length": 17.380952835083008, "blob_id": "32d2ac3b78e16d7d72aa338481e1e9f2535765f6", "content_id": "80c8f58320f4d85535196f8f42b4d6894feae652", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 387, "license_type": "permissive", "max_line_length": 66, "num_lines": 21, "path": "/data_mining/join_based/Common.h", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "#pragma once\n#include \"stdafx.h\"\n#include \"Types.h\"\n#include \"JoinBase.h\"\n#include <cmath>\n\nclass JoinBase;\n\nclass Common\n{\npublic:\n\tbool isRReachable(LocationType& loc1, LocationType& loc2);\n\tbool multi_rel(CellPositionType &cell1, CellPositionType &cell2);\n\n\tCommon(double distance,double cellResolution);\n\tJoinBase* joinBase;\n\nprivate:\n\tdouble _distance;\n\tdouble _cellResolution;\n};\n\n" }, { "alpha_fraction": 0.6157804727554321, "alphanum_fraction": 0.619210958480835, "avg_line_length": 20.08433723449707, "blob_id": "8a2ce9d88e64b71c6885e3d2b6d4114be774891f", "content_id": "b1569beaeaf29eab9a92b439a0232d193e52560a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1749, "license_type": "permissive", "max_line_length": 67, "num_lines": 83, "path": "/Apriori/HashTree.h", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "#ifndef HASHNODE_H_INCLUDED\n#define HASHNODE_H_INCLUDED\n\n#include <unordered_map>\n#include <memory>\n#include <vector>\n\ntemplate<typename T>\nclass HashNode;\n\ntemplate<typename T>\nusing HashTree = HashNode<T>;\n\n// T is key's type.\ntemplate <typename T>\nclass HashNode\n{\npublic:\n HashNode() = default;\n HashNode(const HashNode<T> &) = delete;\n\n void insert(const T &);\n void insert(const std::vector<T>&);\n std::shared_ptr<HashNode<T>> next(const T &);\n bool search(const T &);\n bool search(const std::vector<T> &);\n\nprivate:\n std::unordered_map<T, std::shared_ptr<HashNode<T>>> _hashTable;\n};\n\ntemplate <typename T>\nvoid HashNode<T>::insert(const T &key)\n{\n // The key has inserted into the hash tree, just return.\n if (_hashTable.count(key))\n return;\n\n _hashTable[key] = std::make_shared<HashNode<T>>();\n}\n\ntemplate <typename T>\nvoid HashNode<T>::insert(const std::vector<T> &keys)\n{\n insert(keys[0]);\n std::shared_ptr<HashNode<T>> p = _hashTable[keys[0]];\n for(unsigned int i = 1; i < keys.size(); ++i)\n {\n p->insert(keys[i]);\n p = p->next(keys[i]);\n }\n}\n\ntemplate <typename T>\nstd::shared_ptr<HashNode<T>> HashNode<T>::next(const T &key)\n{\n if (!_hashTable.count(key))\n return nullptr;\n return _hashTable[key];\n}\n\ntemplate <typename T>\nbool HashNode<T>::search(const T &key)\n{\n return _hashTable.count(key);\n}\n\ntemplate <typename T>\nbool HashNode<T>::search(const std::vector<T> &keys)\n{\n if(!search(keys[0])) return false;\n std::shared_ptr<HashNode<T>> p = _hashTable[keys[0]];\n\n for(unsigned int i = 1; i < keys.size(); ++i)\n {\n if (!p->search(keys[i]))\n return false;\n p = p->next(keys[i]);\n }\n return true;\n}\n\n#endif" }, { "alpha_fraction": 0.622075080871582, "alphanum_fraction": 0.6353200674057007, "avg_line_length": 26.289155960083008, "blob_id": "4068ec49d78a27898dc2552670efa9b047ea8c46", "content_id": "6c8abc3b8b8daf3495e3bf8986643c42d6f1a9f6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2265, "license_type": "permissive", "max_line_length": 94, "num_lines": 83, "path": "/Apriori/Apriori.cpp", "repo_name": "juanjuanShu/codes", "src_encoding": "UTF-8", "text": "#include \"Apriori.h\"\n#include \"HashTree.h\"\n#include <string>\n#include <iostream>\n#include <fstream>\n#include <assert.h>\n#include <sstream>\n#include <algorithm>\n#include <chrono>\n\nusing std::chrono::high_resolution_clock;\nusing std::chrono::milliseconds;\n\nusing namespace std;\n\n\nint main(int argc, char **argv) {\n\tif(argc != 4) {\n\t\tcout << \"Argument number must be 3\" << endl;\n\t\tcout << \"./Apriori supportRank confidenceRank inputPath\" << endl;\n\t\treturn 0;\n\t}\n\tdouble supp = stod(argv[1]), conf = stod(argv[2]);\n\tstring inputPath(argv[3]);\n\n\tifstream fs(inputPath, ios::in);\n\n\tstd::shared_ptr<ItemSetSet<string>> transSet = std::make_shared<ItemSetSet<string>>();\n\tstring line;\n\twhile(getline(fs, line))\n\t{\n\t\tif(line.back() == '\\n') line.pop_back(); // pop back the enter.\n\t\tItemSet<string> itemset;\n\n\t\tstringstream ss(line);\n\t\tstring item;\n\t\twhile(ss >> item)\n\t\t{\n\t\t\titemset.push_back(move(item));\n\t\t}\n\n\t\tsort(itemset.begin(), itemset.end());\n\t\ttransSet->push_back(move(itemset));\n\t}\n\n\t/*\n\tstd::shared_ptr<ItemSetSet<string>> transSet = std::make_shared<ItemSetSet<string>>();\n\ttransSet->push_back(ItemSet<string>{\"I1\", \"I2\", \"I5\"});\n\ttransSet->push_back(ItemSet<string>{\"I2\", \"I4\"});\n\ttransSet->push_back(ItemSet<string>{\"I2\", \"I3\"});\n\ttransSet->push_back(ItemSet<string>{\"I1\", \"I2\", \"I4\"} );\n\ttransSet->push_back(ItemSet<string>{\"I1\", \"I3\"});\n\ttransSet->push_back(ItemSet<string>{\"I2\", \"I3\"});\n\ttransSet->push_back(ItemSet<string>{\"I1\", \"I3\"});\n\ttransSet->push_back(ItemSet<string>{\"I1\", \"I2\", \"I3\", \"I5\"});\n\ttransSet->push_back(ItemSet<string>{\"I1\", \"I2\", \"I3\"});\n\t*/\n\n high_resolution_clock::time_point beginTime = high_resolution_clock::now();\n\n Apriori<string> apriori(transSet, supp, conf);\n auto rules = apriori.execute();\n\n\n high_resolution_clock::time_point endTime = high_resolution_clock::now();\n milliseconds timeInterval = std::chrono::duration_cast<milliseconds>(endTime - beginTime);\n\n for(auto &rule : *rules)\n {\n for(auto &item : rule.antecedent) cout << item << ' ';\n \n cout << \" => \";\n \n for(auto &item : rule.consequent) cout << item << ' ';\n\n\t\tcout << \"with confidence rate : \" << rule.conf;\n\n cout << endl;\n }\n\n std::cout << timeInterval.count() << \"ms\\n\";\n return 0;\n}\n" } ]
14
davidBelanger/protein-embedding-retrieval
https://github.com/davidBelanger/protein-embedding-retrieval
0eecf00bc52437ec833c53a99b398ffb11d7efd8
42aea8016e04154cef43dd9cf5e925df8fee75bb
0d6fbf2fa7458f94016167ae0fde0e83d37a2969
refs/heads/master
2022-12-08T20:29:14.276395
2020-08-25T16:04:28
2020-08-25T16:04:28
290,257,561
1
0
null
2020-08-25T15:49:45
2020-08-07T16:34:23
2020-08-25T14:34:01
null
[ { "alpha_fraction": 0.6388229131698608, "alphanum_fraction": 0.6519832015037537, "avg_line_length": 33.626583099365234, "blob_id": "26470cfbd41a760b9d84fe49ba2b9c62198456c6", "content_id": "8c1e8d02610851e08bc2e6efc7edf4e9cb6d8d3b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5471, "license_type": "permissive", "max_line_length": 120, "num_lines": 158, "path": "/contextual_lenses/encoders.py", "repo_name": "davidBelanger/protein-embedding-retrieval", "src_encoding": "UTF-8", "text": "\"\"\"Encoder functions\n\nFixed and learnable transformations for embedding sequences.\n\"\"\"\n\n\nimport flax\nfrom flax import nn\n\nimport jax\nfrom jax import lax\nimport jax.nn\nimport jax.numpy as jnp\n\nimport numpy as np\n\nfrom operator import itemgetter\n\n\ndef one_hot_encoder(batch_inds, num_categories):\n \"\"\"Applies one-hot encoding from jax.nn.\"\"\"\n\n one_hots = jax.nn.one_hot(batch_inds, num_classes=num_categories)\n \n return one_hots\n\n\nclass CNN(nn.Module):\n \"\"\"A simple 1D CNN model.\"\"\"\n\n def apply(self, x, n_layers, n_features, n_kernel_sizes):\n \n x = jnp.expand_dims(x, axis=2)\n\n for layer in range(n_layers):\n features = n_features[layer]\n kernel_size = (n_kernel_sizes[layer], 1)\n x = nn.Conv(x, features=features, kernel_size=kernel_size)\n x = nn.relu(x)\n \n x = jnp.squeeze(x, axis=2)\n\n return x\n\n\ndef cnn_one_hot_encoder(batch_inds, num_categories, n_layers, n_features, n_kernel_sizes):\n \"\"\"Applies one-hot encoding followed by 1D CNN.\"\"\"\n\n one_hots = one_hot_encoder(batch_inds, num_categories)\n cnn_one_hots = CNN(one_hots, n_layers, n_features, n_kernel_sizes)\n \n return cnn_one_hots\n\n\n# Positional embeddings\n# Code source: https://github.com/google/flax/blob/aff10f032e892e28a1acf4dd4ee9dcc6cd39a606/examples/wmt/models.py.\ndef sinusoidal_init(max_len=2048,\n min_scale=1.0,\n max_scale=10000.0):\n \"\"\"1D Sinusoidal Position Embedding Initializer.\n Args:\n max_len: maximum possible length for the input.\n min_scale: float: minimum frequency-scale in sine grating.\n max_scale: float: maximum frequency-scale in sine grating.\n Returns:\n output: init function returning `(1, max_len, d_feature)`\n \"\"\"\n\n def init(key, shape, dtype=np.float32):\n \"\"\"Sinusoidal init.\"\"\"\n del key, dtype\n d_feature = shape[-1]\n pe = np.zeros((max_len, d_feature), dtype=np.float32)\n position = np.arange(0, max_len)[:, np.newaxis]\n scale_factor = -np.log(max_scale / min_scale) / (d_feature // 2 - 1)\n div_term = min_scale * np.exp(np.arange(0, d_feature // 2) * scale_factor)\n pe[:, :d_feature // 2] = np.sin(position * div_term)\n pe[:, d_feature // 2: 2 * (d_feature // 2)] = np.cos(position * div_term)\n pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]\n return jnp.array(pe)\n\n return init\n\n\nclass AddPositionEmbs(nn.Module):\n \"\"\"Adds (optionally learned) positional embeddings to the inputs.\"\"\"\n\n def apply(self,\n inputs,\n inputs_positions=None,\n max_len=512,\n posemb_init=None,\n cache=None):\n \"\"\"Applies AddPositionEmbs module.\n By default this layer uses a fixed sinusoidal embedding table. If a\n learned position embedding is desired, pass an initializer to\n posemb_init.\n Args:\n inputs: input data.\n inputs_positions: input position indices for packed sequences.\n max_len: maximum possible length for the input.\n posemb_init: positional embedding initializer, if None, then use a\n fixed (non-learned) sinusoidal embedding table.\n cache: flax attention cache for fast decoding.\n Returns:\n output: `(bs, timesteps, in_dim)`\n \"\"\"\n # inputs.shape is (batch_size, seq_len, emb_dim)\n assert inputs.ndim == 3, ('Number of dimensions should be 3,'\n ' but it is: %d' % inputs.ndim)\n length = inputs.shape[1]\n pos_emb_shape = (1, max_len, inputs.shape[-1])\n if posemb_init is None:\n # Use a fixed (non-learned) sinusoidal position embedding.\n pos_embedding = sinusoidal_init(\n max_len=max_len)(None, pos_emb_shape, None)\n else:\n pos_embedding = self.param('pos_embedding', pos_emb_shape, posemb_init)\n pe = pos_embedding[:, :length, :]\n # We abuse the same attention Cache mechanism to run positional embeddings\n # in fast predict mode. We could use state variables instead, but this\n # simplifies invocation with a single top-level cache context manager.\n # We only use the cache's position index for tracking decoding position.\n if cache:\n if self.is_initializing():\n cache.store(lambda: (4, (1, 1)))\n else:\n cache_entry = cache.retrieve(None)\n i = cache_entry.i\n cache.store(cache_entry.replace(i=cache_entry.i + 1))\n _, _, df = pos_embedding.shape\n pe = lax.dynamic_slice(pos_embedding,\n jnp.array((0, i, 0)),\n jnp.array((1, 1, df)))\n if inputs_positions is None:\n # normal unpacked case:\n return inputs + pe\n else:\n # for packed data we need to use known position indices:\n return inputs + jnp.take(pe[0], inputs_positions, axis=0)\n\n\ndef one_hot_pos_emb_encoder(batch_inds, num_categories, max_len, posemb_init):\n \"\"\"Applies one-hot encoding with positional embeddings.\"\"\"\n \n one_hots = jax.nn.one_hot(batch_inds, num_classes=num_categories)\n one_hots_pos_emb = AddPositionEmbs(one_hots, max_len=max_len, posemb_init=posemb_init)\n \n return one_hots_pos_emb\n\n\ndef cnn_one_hot_pos_emb_encoder(batch_inds, num_categories, n_layers, n_features, n_kernel_sizes, max_len, posemb_init):\n \"\"\"Applies one-hot encoding with positional embeddings followed by CNN.\"\"\"\n\n one_hots_pos_emb = one_hot_pos_emb_encoder(batch_inds, num_categories, max_len=max_len, posemb_init=posemb_init)\n cnn_one_hots_pos_emb = CNN(one_hots_pos_emb, n_layers, n_features, n_kernel_sizes)\n \n return cnn_one_hots_pos_emb\n" }, { "alpha_fraction": 0.6452702879905701, "alphanum_fraction": 0.6452702879905701, "avg_line_length": 23.66666603088379, "blob_id": "264b023cabb400fca97ce44af005219caa89a393", "content_id": "2e4ace6379cbabc56560cf93ae79bba7fdbd79b4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "permissive", "max_line_length": 63, "num_lines": 24, "path": "/contextual_lenses/parser.py", "repo_name": "davidBelanger/protein-embedding-retrieval", "src_encoding": "UTF-8", "text": "\"\"\"Function to parse command line arguments.\"\"\"\n\n\nimport argparse\n\n\ndef parse_args():\n \"\"\"Uses argparse module to parse command line arguments.\"\"\"\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('--tpu_name')\n parser.add_argument('--save_dir')\n parser.add_argument('--restore_dir')\n parser.add_argument('--use_pmap', action='store_true')\n\n args = parser.parse_args()\n \n tpu_name = args.tpu_name\n save_dir = args.save_dir\n restore_dir = args.restore_dir\n use_pmap = args.use_pmap\n \n return tpu_name, save_dir, restore_dir, use_pmap\n" }, { "alpha_fraction": 0.6057619452476501, "alphanum_fraction": 0.6097422242164612, "avg_line_length": 32.60509490966797, "blob_id": "5fb3b4cd80fbd6cc116f45d3a4def7347f218d21", "content_id": "e28bfa444c2772a4bec92d1c502f4b6152201b4f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5276, "license_type": "permissive", "max_line_length": 122, "num_lines": 157, "path": "/contextual_lenses/train_utils.py", "repo_name": "davidBelanger/protein-embedding-retrieval", "src_encoding": "UTF-8", "text": "\"\"\"Train utils\n\nGeneral tools for instantiating and training models.\n\"\"\"\n\n\nimport flax\nfrom flax import nn\nfrom flax import optim\nfrom flax.training import checkpoints\nfrom flax.training import common_utils\n\nimport jax\nfrom jax import random\nimport jax.nn\nimport jax.numpy as jnp\n\nimport tensorflow as tf\n\nimport numpy as np\n\nimport functools\n\n\n\n# Data batching.\ndef create_data_iterator(df, input_col, output_col, batch_size, epochs=1, buffer_size=None, seed=0, drop_remainder=False):\n\n if buffer_size is None:\n buffer_size = len(df)\n\n inputs = list(df[input_col].values)\n inputs = tf.data.Dataset.from_tensor_slices(inputs)\n\n outputs = df[output_col].values\n outputs = tf.data.Dataset.from_tensor_slices(outputs)\n\n batches = tf.data.Dataset.zip((inputs, outputs)).shuffle(buffer_size=buffer_size, seed=seed)\n batches = batches.repeat(epochs).batch(batch_size=batch_size, drop_remainder=drop_remainder).as_numpy_iterator()\n\n return batches\n\n\ndef create_optimizer(model, learning_rate, weight_decay):\n \"\"\"Instantiates Adam optimizer.\"\"\"\n\n optimizer_def = optim.Adam(learning_rate=learning_rate, weight_decay=weight_decay)\n optimizer = optimizer_def.create(model)\n \n return optimizer\n\n\[email protected](jax.jit, static_argnums=(3, 4))\ndef train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs):\n \"\"\"Trains model (optimizer.target) using specified loss function.\"\"\"\n\n def compute_loss_fn(model, X, Y, loss_fn, loss_fn_kwargs):\n Y_hat = model(X)\n loss = loss_fn(Y, Y_hat, **loss_fn_kwargs)\n return loss\n \n grad_fn = jax.value_and_grad(compute_loss_fn)\n _, grad = grad_fn(optimizer.target, X, Y, loss_fn, loss_fn_kwargs)\n optimizer = optimizer.apply_gradient(grad)\n \n return optimizer\n\n\ndef get_p_train_step():\n \"\"\"Wraps train_step with jax.pmap.\"\"\"\n \n p_train_step = jax.pmap(train_step, axis_name='batch', static_broadcasted_argnums=(3, 4))\n \n return p_train_step\n\n\ndef train(model, train_data, loss_fn, loss_fn_kwargs, learning_rate=1e-4, weight_decay=0.1,\n restore_dir=None, save_dir=None, use_pmap=False):\n \"\"\"Instantiates optimizer, applies train_step/p_train_step over training data.\"\"\" \n \n optimizer = create_optimizer(model, learning_rate=learning_rate, weight_decay=weight_decay)\n\n if restore_dir is not None:\n optimizer = checkpoints.restore_checkpoint(ckpt_dir=restore_dir, target=optimizer)\n\n if use_pmap:\n p_train_step = get_p_train_step()\n optimizer = optimizer.replicate()\n\n for batch in iter(train_data):\n X, Y = batch\n X, Y = common_utils.shard(X), common_utils.shard(Y)\n optimizer = p_train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs)\n\n optimizer = optimizer.unreplicate()\n \n else: \n for batch in iter(train_data):\n X, Y = batch\n optimizer = train_step(optimizer, X, Y, loss_fn, loss_fn_kwargs)\n \n if save_dir is not None:\n checkpoints.save_checkpoint(ckpt_dir=save_dir, target=optimizer, step=optimizer.state.step)\n\n return optimizer\n\n\nclass RepresentationModel(nn.Module):\n\n def apply(self, x, encoder_fn, encoder_fn_kwargs, reduce_fn, reduce_fn_kwargs,\n num_categories, output_features, embed=False):\n \"\"\"Computes padding mask, encodes indices using embeddings, \n applies lensing operation, predicts scalar value.\n \"\"\"\n\n padding_mask = jnp.expand_dims(jnp.where(x < num_categories-1, 1, 0), axis=2)\n\n x = encoder_fn(x, num_categories=num_categories, **encoder_fn_kwargs)\n\n rep = reduce_fn(x, padding_mask=padding_mask, **reduce_fn_kwargs)\n\n if embed:\n return rep\n \n out = nn.Dense(rep,\n output_features,\n kernel_init=nn.initializers.xavier_uniform(),\n bias_init=nn.initializers.normal(stddev=1e-6)) \n \n return out\n\n\ndef create_representation_model(encoder_fn, encoder_fn_kwargs, reduce_fn, reduce_fn_kwargs,\n num_categories, output_features, embed=False, key=random.PRNGKey(0)):\n \"\"\"Instantiates a RepresentationModel object.\"\"\"\n\n module = RepresentationModel.partial(encoder_fn=encoder_fn,\n encoder_fn_kwargs=encoder_fn_kwargs, \n reduce_fn=reduce_fn,\n reduce_fn_kwargs=reduce_fn_kwargs,\n num_categories=num_categories,\n output_features=output_features,\n embed=embed)\n \n _, initial_params = RepresentationModel.init_by_shape(key,\n input_specs=[((1, 1), jnp.float32)],\n encoder_fn=encoder_fn,\n encoder_fn_kwargs=encoder_fn_kwargs,\n reduce_fn=reduce_fn,\n reduce_fn_kwargs=reduce_fn_kwargs,\n num_categories=num_categories,\n output_features=output_features,\n embed=embed)\n \n model = nn.Model(module, initial_params)\n \n return model\n" } ]
3
Ddimitrako/Alexa-Skill-kit-for-Extron-Control-System
https://github.com/Ddimitrako/Alexa-Skill-kit-for-Extron-Control-System
847976ba59e110e9a275c88e6e1466c0449cf313
8f34aba2dbfb27d5872c4cf34ea0007918c00b56
936983e79b430b6aa6cea7d9a29ecc3041e47190
refs/heads/main
2023-02-05T22:49:41.570009
2020-12-31T17:38:19
2020-12-31T17:38:19
305,153,907
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7707924246788025, "alphanum_fraction": 0.7982972860336304, "avg_line_length": 55.55555725097656, "blob_id": "1d9f13ce9a16caddc75ae676e26a027f1b0d7dcc", "content_id": "717a75dbaaa9fa7ac601c612b7bd294327916091", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1527, "license_type": "no_license", "max_line_length": 332, "num_lines": 27, "path": "/README.md", "repo_name": "Ddimitrako/Alexa-Skill-kit-for-Extron-Control-System", "src_encoding": "UTF-8", "text": "# Alexa-Skill-kit-for-Extron-Control-System\n\nThe purpose of this projects is to let video conference room users to control the basic room function with voice commands.\nOur room is based on Extron control processor IPCP PRO 350 and the Smart speaker with Alexa is the Echo Dot (3rd Gen).\nSo in this project both Amazon Alexa and Extron Controller where programmed to cooperate.\nFistly i used alexa developer console to create a new skill that contains all the key words that are related with the room fuctions. \nThe second step was to create a new function in the Amazon AWS Lambda with Python. This function receives the incoming json output from the AI voice recognition \nand then send the appropriate comand to the controller. \nFinally the controller receives the command throught a web socket and then runs the deddicated function for the room automation sequence.\n\n# How it works\nA simple explanation:\nWhen alexa hears your command it send your voice to the cloud, where it processes it and result a json file with all the words contained in the sentence you pronounced. Then it sent it to alexa skill and compare it with the saved intents i have created. If data are matched then the existed code in Aws lambda start the execution. \n \n# Images\nEco Dot Device\n![download](https://user-images.githubusercontent.com/73073984/103212731-4c1bdc80-4914-11eb-80fb-ec481f34b887.jpg)\n\nTested room image\n\n![](images/room.jpg) \n\nServices Diagram\n![](images/CloudServices.jpg)\n\n# Check-it-on-Youtube\nLink:https://youtu.be/hlUI-A3MYXQ\n" }, { "alpha_fraction": 0.6214141845703125, "alphanum_fraction": 0.6256356239318848, "avg_line_length": 37.33207702636719, "blob_id": "0e4ffbb7b7365ddb2cbb86018a8a5fef8d0a4b55", "content_id": "11a1eae1c1139328cd12559067752a9ca62a2ef0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10423, "license_type": "no_license", "max_line_length": 120, "num_lines": 265, "path": "/Alexa_Extron_AWS_Application.py", "repo_name": "Ddimitrako/Alexa-Skill-kit-for-Extron-Control-System", "src_encoding": "UTF-8", "text": "\r\nfrom __future__ import print_function\r\nimport socket\r\nimport logging\r\nimport json\r\nimport sys\r\n\r\n \r\n# --------------- Helpers that build all of the responses ----------------------\r\n\r\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\r\n return {\r\n 'outputSpeech': {\r\n 'type': 'PlainText',\r\n 'text': output\r\n },\r\n 'card': {\r\n 'type': 'Simple',\r\n 'title': \"SessionSpeechlet - \" + title,\r\n 'content': \"SessionSpeechlet - \" + output\r\n },\r\n 'reprompt': {\r\n 'outputSpeech': {\r\n 'type': 'PlainText',\r\n 'text': reprompt_text\r\n }\r\n },\r\n 'shouldEndSession': should_end_session\r\n }\r\n\r\ndef build_response(session_attributes, speechlet_response):\r\n return {\r\n 'version': '1.0',\r\n 'sessionAttributes': session_attributes,\r\n 'response': speechlet_response\r\n }\r\n\r\ndef extronSend(myCommand):\r\n s = socket.socket()\r\n host = '62.103.65.43' # Web or IP address of the Extron Control Processor (in String Format)\r\n port = 4001 # port to listen on (as an Integer)\r\n s.connect((host, port))\r\n s.send(myCommand)\r\n s.close()\r\n '''s = socket.socket()\r\n host = '62.103.65.43' # Web or IP address of the Extron Control Processor (in String Format)\r\n port = 5000 # port to listen on (as an Integer)\r\n s.connect((host, port))\r\n s.send(myCommand)\r\n #extronResponse = (s.recv(1024)).decode(\"utf-8\") \r\n s.close()'''\r\n\r\n\r\n# --------------- Functions that control the skill's behavior ------------------\r\ndef get_AudioCall_response(intent):\r\n session_attributes = {}\r\n card_title = \"AudioCall\"\r\n print(intent)\r\n speech_output = \"You called \" +intent['slots']['number']['value']\r\n extronSend(bytes(\"AudioCall: \"+intent['slots']['number']['value'],'utf-8'))\r\n reprompt_text = \" \"\r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n\r\n\r\n\r\n\r\ndef get_inputOutputIntent_response(intent): \r\n session_attributes = {}\r\n card_title = \"inputOutputIntent\"\r\n \r\n speech_output = \" You send {0} to {1} \".format(intent['slots']['input']['value'],intent['slots']['output']['value'])\r\n extronSend(bytes(\"Matrix: \"+intent['slots']['input']['value']+\" \"+intent['slots']['output']['value'], 'utf-8'))\r\n \r\n reprompt_text = \" \"\r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n\r\ndef get_lightsIntent_response(intent): \r\n session_attributes = {}\r\n card_title = \"lightsIntent\"\r\n print(intent['slots']['lightsmode']['value'])\r\n speech_output = \"You set lights {0}\" .format(intent['slots']['lightsmode']['value'])\r\n extronSend(bytes(\"Lights: \" + intent['slots']['lightsmode']['value'], 'utf-8'))\r\n reprompt_text = \" \"\r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n\r\n\r\ndef get_shadesIntent_response(intent): \r\n session_attributes = {}\r\n card_title = \"shadesIntent\"\r\n print(intent)\r\n speech_output = \"You set shades {0}\" .format(intent['slots']['mode']['value'])\r\n extronSend(bytes(\"Shades: \" + intent['slots']['mode']['value'], 'utf-8'))\r\n reprompt_text = \" \"\r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n\r\ndef get_volumeIntent_response(intent): \r\n session_attributes = {}\r\n card_title = \"volumeIntent\"\r\n speech_output = \"Volume command received\"\r\n #print(intent)\r\n extronSend(bytes(\"Volume: \" + intent['slots']['volumemode']['value'], 'utf-8'))\r\n reprompt_text = \" \"\r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n\r\ndef get_InputPresetIntent_response(intent): \r\n session_attributes = {}\r\n card_title = \"InputPresetIntent\"\r\n speech_output = \"You selected \" + intent['slots']['Input']['value']\r\n extronSend(bytes(\"Preset: \"+ intent['slots']['Input']['value'], 'utf-8'))\r\n reprompt_text = \" \"\r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session)) \r\n\r\ndef get_monitorsIntent_response(intent): \r\n session_attributes = {}\r\n card_title = \"monitorsIntent\"\r\n speech_output = \"You set monitors \" + intent['slots']['monitors_states']['value']\r\n extronSend(bytes(\"Monitors \" + intent['slots']['monitors_states']['value'], 'utf-8'))\r\n reprompt_text = \" \"\r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n\r\ndef get_test_response(): \r\n session_attributes = {}\r\n card_title = \"Test\"\r\n speech_output = \"This is a test message for debugging\"\r\n extronSend(bytes(\"test\", 'utf-8'))\r\n reprompt_text = \" \"\r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n\r\n\r\n\r\ndef get_bossIntent_response(): \r\n session_attributes = {}\r\n card_title = \"BossIntent\"\r\n speech_output = \"You are the Boss Dimitris\"\r\n reprompt_text = \" \"\r\n should_end_session = False\r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n\r\ndef get_welcome_response():\r\n \"\"\" If we wanted to initialize the session to have some attributes we could\r\n add those here \"\"\" \r\n extronSend('Welcome'.encode())\r\n session_attributes = {}\r\n card_title = \"Welcome\"\r\n speech_output = \"Welcome to the extron control System!\"\r\n # If the user either does not reply to the welcome message or says something\r\n # that is not understood, they will be prompted again with this text.\r\n reprompt_text = \" \"\r\n should_end_session = False\r\n \r\n return build_response(session_attributes, build_speechlet_response(\r\n card_title, speech_output, reprompt_text, should_end_session))\r\n\r\n\r\ndef handle_session_end_request():\r\n card_title = \"Session Ended\"\r\n speech_output = \"Thank you for trying the Alexa Skills Kit sample.\" \\\r\n \"Have a nice day!\"\r\n # Setting this to true ends the session and exits the skill.\r\n should_end_session = True\r\n return build_response({}, build_speechlet_response(\r\n card_title, speech_output, None, should_end_session))\r\n\r\n# --------------- Events ------------------\r\n\r\ndef on_session_started(session_started_request, session):\r\n \"\"\" Called when the session starts.\r\n One possible use of this function is to initialize specific \r\n variables from a previous state stored in an external database\r\n \"\"\"\r\n # Add additional code here as needed\r\n pass\r\n\r\ndef on_launch(launch_request, session):\r\n \"\"\" Called when the user launches the skill without specifying what they\r\n want\r\n \"\"\"\r\n # Dispatch to your skill's launch message\r\n return get_welcome_response()\r\n\r\ndef on_intent(intent_request, session):\r\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\r\n\r\n intent = intent_request['intent']\r\n intent_name = intent_request['intent']['name']\r\n\r\n # Dispatch to your skill's intent handlers\r\n if intent_name == \"test\":\r\n return get_test_response()\r\n elif intent_name==\"inputoutputIntent\":\r\n return get_inputOutputIntent_response(intent)\r\n elif intent_name==\"lightsIntent\":\r\n return get_lightsIntent_response(intent)\r\n elif intent_name==\"shadesIntent\":\r\n return get_shadesIntent_response(intent)\r\n elif intent_name==\"volumeIntent\":\r\n return get_volumeIntent_response(intent)\r\n elif intent_name==\"InputPresetIntent\":\r\n return get_InputPresetIntent_response(intent)\r\n elif intent_name==\"monitorsIntent\":\r\n return get_monitorsIntent_response(intent)\r\n elif intent_name==\"bossIntent\":\r\n return get_bossIntent_response()\r\n elif intent_name==\"AudioCall\":\r\n return get_AudioCall_response(intent)\r\n elif intent_name == \"AMAZON.HelpIntent\":\r\n return get_welcome_response()\r\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\r\n return handle_session_end_request()\r\n else:\r\n raise ValueError(\"Invalid intent\")\r\n\r\n\r\ndef on_session_ended(session_ended_request, session):\r\n \"\"\" Called when the user ends the session.\r\n Is not called when the skill returns should_end_session=true\r\n \"\"\"\r\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\r\n \", sessionId=\" + session['sessionId'])\r\n # add cleanup logic here\r\n\r\n\r\n# --------------- Main handler ------------------\r\n\r\ndef lambda_handler(event, context):\r\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\r\n etc.) The JSON body of the request is provided in the event parameter.\r\n \"\"\"\r\n print(\"Incoming request...\")\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])" }, { "alpha_fraction": 0.48105502128601074, "alphanum_fraction": 0.49310484528541565, "avg_line_length": 39.266666412353516, "blob_id": "23d42bd70473c1c87995c9b222e8dce23823da89", "content_id": "90bd2433e7393bf89dddd6774b44b58c610a4d09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7469, "license_type": "no_license", "max_line_length": 97, "num_lines": 180, "path": "/Alexa.py", "repo_name": "Ddimitrako/Alexa-Skill-kit-for-Extron-Control-System", "src_encoding": "UTF-8", "text": "## Begin ControlScript Import --------------------------------------------------\r\nfrom extronlib import event, Version\r\nfrom extronlib.device import ProcessorDevice, UIDevice\r\nfrom extronlib.interface import EthernetClientInterface, \\\r\n EthernetServerInterface, SerialInterface, IRInterface, RelayInterface, \\\r\n ContactInterface, DigitalIOInterface, FlexIOInterface, SWPowerInterface, \\\r\n VolumeInterface\r\nfrom extronlib.ui import Button, Knob, Label, Level\r\nfrom extronlib.system import Clock, MESet, Wait,Timer\r\nfrom extronlib.interface import EthernetServerInterfaceEx\r\nfrom Video_Switching import Preset_Buttons,SecondBtnList,Update_Matrix\r\nfrom ON_OFF import LightButtonHandler,Projector_Power_ON,Projector_Power_OFF,System_ON,System_OFF\r\nfrom ON_OFF import Projector_ON,Projector_OFF,AllOn,AllOff,LightingSystem\r\nimport re\r\nimport json\r\nfrom Audio_Control import Set_Volume,Level_Mute_Unmute,Tesira\r\n\r\nserv = EthernetServerInterfaceEx(4001, 'TCP')\r\nserv.StartListen() \r\nif serv.StartListen() != 'Listening':\r\n print('Port unavailable: check firewall / port number')\r\n\r\n\r\nAlexaFeedback={\r\n'System_ON':'on',\r\n'System_OFF':'off',\r\n\r\n'Projector_ON':'Monitors on',\r\n'Projector_OFF':'Monitors off',\r\n\r\n'AllShadesStop':'Shades: stop',\r\n'AllShadesUp':'Shades: on',\r\n'AllShadesDown':'Shades: off',\r\n\r\n'AllOn':'Lights: on',\r\n'AllOff':'Lights: off',\r\n'WarmLights':'Lights: warm',\r\n'ColdLights':'Lights: cold',\r\n\r\n'Preset' :'Preset:',\r\n'PC' :'Preset: PC',\r\n'ClickShare':'Preset: click',\r\n'HDMI' :'Preset: HDMI',\r\n'TV' :'Preset: TV',\r\n'Cisco' :'Preset: Cisco',\r\n\r\n'Matrix' :'Matrix:',\r\n'PC' :'PC',\r\n'ClickShare':'click',\r\n'HDMI' :'HDMI',\r\n'TV' :'TV',\r\n'Cisco' :'Cisco',\r\n}\r\n\r\nAlexaSynonims={\r\n'leftSynonim':['left','one','1'],\r\n'rightSynonim':['right','two','2','second','write'],\r\n'bothSynonim':['both','one and two','1 and 2','right and left'],\r\n}\r\n\r\nVolumeMode=['mute','unmute','max','min']\r\n \r\n \r\n\"\"\"*****************MAtrix*********************\"\"\" \r\n@event(serv, 'ReceiveData')\r\ndef HandleReceiveMatrix(client, data):\r\n print(data) \r\n data=data.decode('UTF-8') \r\n if 'Lights: on' in data:\r\n LightingSystem.Set('RelayControl','On', {'Address':103}) #103=allon\r\n LightingSystem.Set('RelayControl','Off', {'Address':109}) #front face off\r\n elif 'Lights: off' in data: \r\n LightingSystem.Set('RelayControl','Off', {'Address':103}) \r\n elif 'Monitors on' in data:\r\n Projector_Power_ON()\r\n elif 'Monitors off' in data:\r\n Projector_Power_OFF() \r\n elif 'Shades: stop' in data:\r\n LightingSystem.Set('RelayControl','On', {'Address':302}) \r\n LightingSystem.Set('RelayControl','On', {'Address':303}) \r\n elif 'Shades: on' in data:\r\n LightingSystem.Set('RelayControl','Off', {'Address':301}) \r\n LightingSystem.Set('RelayControl','Off', {'Address':300}) \r\n elif 'Shades: off' in data:\r\n LightingSystem.Set('RelayControl','On', {'Address':300}) \r\n LightingSystem.Set('RelayControl','On', {'Address':301}) \r\n \r\n if 'Volume:' in data: \r\n if ' mute' in data:\r\n Level_Mute_Unmute(True)\r\n print('Volume mute Done')\r\n elif 'unmute' in data:\r\n Level_Mute_Unmute(False)\r\n print('Volume unmute Done')\r\n elif 'max' in data:\r\n Set_Volume(0)\r\n print('Volume max Done')\r\n elif 'Min' in data: \r\n Set_Volume(-50)\r\n print('Volume Min Done')\r\n else:\r\n matchVolumeValue = re.findall(r'[0-9]+', data)\r\n print(matchVolumeValue)\r\n #Set_Volume(int(matchVolumeValue[0]/2)-50)\r\n Volume=int(matchVolumeValue[0])/2-50\r\n Tesira.Set('LevelControl', Volume, {'Instance Tag': 'Level7', 'Channel': '1'})\r\n\r\n\r\n if 'Preset:' in data:\r\n if 'clickshare' in data:\r\n Preset_Buttons(SecondBtnList[0],\"Pressed\") \r\n print('Preset Clicshare selected ') \r\n elif 'Cisco' in data:\r\n Preset_Buttons(SecondBtnList[2],\"Pressed\") \r\n print('Preset Cisco selected ') \r\n elif 'PC' in data:\r\n Preset_Buttons(SecondBtnList[4],\"Pressed\") \r\n print('Preset PC selected ') \r\n elif 'HDMI' in data:\r\n Preset_Buttons(SecondBtnList[5],\"Pressed\") \r\n print('Preset HDMI selected ') \r\n \r\n \r\n if 'Matrix:' in data:\r\n if 'PC' in data: \r\n for key, lista in AlexaSynonims.items(): \r\n for synonim in lista:\r\n if synonim in data:\r\n if key=='leftSynonim':\r\n Update_Matrix(1,1)\r\n print(\"pc left screen done\")\r\n elif key=='rightSynonim':\r\n Update_Matrix(1,2)\r\n print(\"pc right screen done\")\r\n elif key=='bothSynonim':\r\n Update_Matrix(1,1)\r\n Update_Matrix(6,2)\r\n print(\"pc both screens done\") \r\n if 'click' in data: \r\n for key, lista in AlexaSynonims.items(): \r\n for synonim in lista:\r\n if synonim in data:\r\n if key=='leftSynonim':\r\n Update_Matrix(4,1)\r\n print(\"clickshare left screen done\")\r\n elif key=='rightSynonim':\r\n Update_Matrix(4,2)\r\n print(\"clickshare right screen done\")\r\n elif key=='bothSynonim':\r\n Update_Matrix(4,1)\r\n Update_Matrix(4,2)\r\n print(\"clickshare both screens done\") \r\n if 'Hdmi' in data: \r\n for key, lista in AlexaSynonims.items(): \r\n for synonim in lista:\r\n if synonim in data:\r\n if key=='leftSynonim':\r\n Update_Matrix(2,1)\r\n print(\"Hdmi left screen done\")\r\n elif key=='rightSynonim':\r\n Update_Matrix(2,2)\r\n print(\"Hdmi right screen done\")\r\n elif key=='bothSynonim':\r\n Update_Matrix(2,1)\r\n Update_Matrix(2,2)\r\n print(\"Hdmi both screens done\") \r\n if 'cisco' in data: \r\n for key, lista in AlexaSynonims.items(): \r\n for synonim in lista:\r\n if synonim in data:\r\n if key=='leftSynonim':\r\n Update_Matrix(2,1)\r\n print(\"cisco left screen done\")\r\n elif key=='rightSynonim':\r\n Update_Matrix(2,2)\r\n print(\"cisco right screen done\")\r\n elif key=='bothSynonim':\r\n Update_Matrix(2,1)\r\n Update_Matrix(2,2)\r\n print(\"cisco both screens done\") \r\n \r\n\r\n \r\n " } ]
3
Gerokusu/Kayambot
https://github.com/Gerokusu/Kayambot
9a1a6559496699eccbb7fba0438868531e24f808
a49cf7fd16fdc049500ae645784cc671b04edf87
37faa01390523b1a76f72832733ab60d311e9ac3
refs/heads/master
2021-09-12T04:42:53.787810
2018-04-14T12:08:23
2018-04-14T12:08:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6386051177978516, "alphanum_fraction": 0.6435364484786987, "avg_line_length": 41.37313461303711, "blob_id": "4dc3e8843f558d863db4361bf840a3b8abc3c5ff", "content_id": "1456f79b6d2d697d58cf877099b77d6ee57baf7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2847, "license_type": "permissive", "max_line_length": 118, "num_lines": 67, "path": "/src/funky/__init__.py", "repo_name": "Gerokusu/Kayambot", "src_encoding": "UTF-8", "text": "import os\nimport asyncio\nimport discord\n\n# To add a command, simply create a function 'async def on_COMMAND(bot, message, arguments)'\n# 'bot' is the original bot object\n# 'message' is the original message object\n# 'arguments' is an array containing all the command arguments\n\nPATH_SONGS = \"resources/songs\"\nJSON_MEMORY_KEY_CHANNEL = \"channel\"\nMESSAGE_PLAY = \"Mes moustaches frémissent ! En avant pour {}, volume {} !\"\nMESSAGE_STOP = \"Vous souhaitez un peu de silence, miaou ?\"\nMESSAGE_INVALID_SONG = \"J'ai une grosse bibliothèque, mais je ne trouve pas votre chanson, miaître...\"\nMESSAGE_NO_SONG = \"Vous devez prrréciser une musique, mon chaton.\"\nMESSAGE_NO_CHANNEL = \"Vous devez rejoindre un chat-nal vocal d'abord !\"\nMESSAGE_MEMORY_CHANNEL_SUCCESSFUL = \"Ce canal sera miaoutilisé pour mes futures envolées artistiques !\"\nMESSAGE_MEMORY_CHANNEL_FAILURE = \"Ce chat-nal est déjà mon lieu de travail !\"\n\nasync def on_channel(bot, message, arguments):\n channel = message.channel\n if channel != None:\n if await bot.getmem(JSON_MEMORY_KEY_CHANNEL) != channel.id:\n await bot.setmem(JSON_MEMORY_KEY_CHANNEL, channel.id)\n await bot.speak(MESSAGE_MEMORY_CHANNEL_SUCCESSFUL);\n else:\n await bot.speak(MESSAGE_MEMORY_CHANNEL_FAILURE);\n\nasync def on_play(bot, message, arguments):\n channel = message.author.voice.voice_channel\n if channel != None:\n if len(arguments) > 0:\n song_title = arguments[0]\n voice = await get_voice(bot, channel)\n if voice != None and not is_playing(bot):\n song_path = bot.get_path(PATH_SONGS + \"/\" + song_title)\n song_volume = int(arguments[1]) if len(arguments) > 1 else 2\n song_volume = song_volume if song_volume <= 100 else 100\n if os.path.isfile(song_path):\n bot.player = voice.create_ffmpeg_player(song_path, options=\"-af volume=\" + str(song_volume / 100))\n bot.player.start()\n await bot.speak(MESSAGE_PLAY.format(song_title, song_volume));\n else:\n await bot.speak(MESSAGE_INVALID_SONG);\n else:\n await bot.speak(MESSAGE_NO_SONG);\n else:\n await bot.speak(MESSAGE_NO_CHANNEL);\n\n\nasync def on_stop(bot, message, arguments):\n if is_playing(bot):\n bot.player.stop()\n await bot.speak(MESSAGE_STOP);\n\ndef is_playing(bot):\n return hasattr(bot, \"player\") and bot.player != None and bot.player.is_playing()\n\nasync def get_voice(bot, channel):\n voice = None\n if channel != None:\n for voice_client in bot.client.voice_clients:\n if voice_client.channel == channel:\n voice = voice_client\n if voice == None:\n voice = await bot.client.join_voice_channel(channel)\n return voice;\n" }, { "alpha_fraction": 0.78125, "alphanum_fraction": 0.78125, "avg_line_length": 10, "blob_id": "0b09a2a15e87669bb7e5bdec4fedadb1b3d75fb6", "content_id": "ddf852ce812aea46864db19d9cfcfe77674a7b47", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "permissive", "max_line_length": 20, "num_lines": 3, "path": "/README.md", "repo_name": "Gerokusu/Kayambot", "src_encoding": "UTF-8", "text": "# Kayambot\n\nDescription à venir." }, { "alpha_fraction": 0.5627918839454651, "alphanum_fraction": 0.5697976350784302, "avg_line_length": 34.685184478759766, "blob_id": "0e95790f830de9f6e6176f62f8943d96cc8e4535", "content_id": "e318a8c36a99051b2f0ce1aaa7cf7ebf9a85e1d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3854, "license_type": "permissive", "max_line_length": 134, "num_lines": 108, "path": "/src/__init__.py", "repo_name": "Gerokusu/Kayambot", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport random\nimport time\nimport asyncio\nimport importlib\nimport json\nimport discord\nfrom colorama import init as colorama\n\nPATH_SRC = \"/src\"\nPATH_MEMORY_FILE = \"/memory.json\"\nJSON_MEMORY_KEY_CHANNEL = \"channel\"\nMESSAGE_UNDEFINED_BEHAVIOUR = \"Behaviour {} was not found.\"\nMESSAGE_MEMORY_READ_SUCCESSFUL = \"Successfully accessed memory key {} of value {}.\"\nMESSAGE_MEMORY_WRITE_SUCCESSFUL = \"Successfully accessed memory key {} of new value {}.\"\n\ncolorama()\n\nclass Bot:\n\n client = discord.Client()\n behaviour_name = \"\"\n behaviour_lib = None\n behaviour_memory = \"\"\n\n def __init__(self, name, token):\n self.set_behaviour(name)\n if(self.behaviour_lib != None):\n self.set_events()\n self.client.run(token)\n self.client.close();\n\n def set_behaviour(self, name):\n self.behaviour_name = name\n self.behaviour_lib = None\n try:\n self.behaviour_lib = importlib.import_module(name)\n except ModuleNotFoundError:\n self.log_error(MESSAGE_UNDEFINED_BEHAVIOUR, name)\n self.behaviour_memory = self.get_path(PATH_SRC + \"/\" + self.behaviour_name + PATH_MEMORY_FILE)\n if not os.path.isfile(self.behaviour_memory):\n with open(self.behaviour_memory, \"w+\") as file:\n json.dump({}, file)\n\n def set_events(self):\n bot = self\n\n @bot.client.event\n async def on_ready():\n bot.log_ok(\"Ready !\")\n\n @bot.client.event\n async def on_message(message):\n bot.log(\"({}#{}) <{}> {}\".format(message.channel.server.name, message.channel.name, message.author.name, message.content))\n if message.content.startswith(\"!\"):\n words = message.content[1:].lower().split(\" \")\n if len(words) > 0:\n command = \"on_\" + words[0]\n arguments = []\n if len(words) > 1:\n arguments.extend(words[1:])\n if hasattr(bot, command):\n await getattr(bot, command)(bot, message, arguments)\n elif bot.behaviour_lib != None and hasattr(bot.behaviour_lib, command):\n await getattr(bot.behaviour_lib, command)(bot, message, arguments)\n\n def get_path(self, file):\n return os.path.split(os.path.abspath(os.path.realpath(sys.argv[0])))[0] + \"/\" + file\n\n def log(self, message, *args):\n print(\"[{}] {}\".format(time.strftime(\"%y/%m/%d-%H:%M:%S\"), str(message).format(*args)))\n\n def log_ok(self, message, *args):\n self.log(\"\\033[92m\" + message + \"\\033[0m\", *args)\n\n def log_error(self, message, *args):\n self.log(\"\\033[91m\" + message + \"\\033[0m\", *args)\n\n async def speak(self, texts, channel = None):\n text = \"\";\n if type(texts) == type(\"\"):\n text = texts\n elif type(texts) == type([]) and len(texts) > 0:\n text = random.choice(texts)\n if channel == None:\n channel_id = await self.getmem(JSON_MEMORY_KEY_CHANNEL)\n if channel_id != None:\n channel = self.client.get_channel(channel_id)\n await self.client.send_message(channel, text)\n\n async def setmem(self, key, value):\n with open(self.behaviour_memory, \"r+\") as file:\n memory = json.load(file)\n memory[key] = value\n file.seek(0)\n file.truncate()\n json.dump(memory, file)\n self.log_ok(MESSAGE_MEMORY_WRITE_SUCCESSFUL, key, value)\n\n async def getmem(self, key):\n value = None\n with open(self.behaviour_memory, \"r\") as file:\n memory = json.load(file)\n if key in memory:\n value = memory[key]\n self.log_ok(MESSAGE_MEMORY_READ_SUCCESSFUL, key, value)\n return value\n" }, { "alpha_fraction": 0.6431273818016052, "alphanum_fraction": 0.6765447854995728, "avg_line_length": 40.73684310913086, "blob_id": "90a17ac7fc5410237a6e124f862b4f419557ca24", "content_id": "41a6595b312f263edb0ea839f06b374fd104f946", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1586, "license_type": "permissive", "max_line_length": 125, "num_lines": 38, "path": "/src/kayambot/scraper.py", "repo_name": "Gerokusu/Kayambot", "src_encoding": "UTF-8", "text": "import urllib.request\nfrom bs4 import BeautifulSoup\n\nLOG_CONNECTION_SUCCESS = \"Successfully connected to {}\"\nLOG_CONNECTION_FAILURE = \"Could not connect to {}\"\nLOG_DIV_FOUND = \"Successfully found div '{}' at index {} : '{}'\"\nLOG_DIV_NOTFOUND = \"Could not find div '{}' at index {}\"\n\nURL_MONSTER = \"http://mhgen.kiranico.com/fr/monstre/{}\"\nURL_MONSTER_ICON = \"https://grox2006.github.io/Kayambot/resources/images/thumbnails/monster_{}.png\"\nURL_MONSTER_ICON_CONSTANT = \"https://grox2006.github.io/Kayambot/resources/images/thumbnails/icon_monster.png\"\nURL_HEADER = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'fr-FR,en;q=0.8',\n 'Connection': 'keep-alive'\n}\n\nasync def get_site(bot, url):\n parser = None\n try:\n parser = BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url, headers=URL_HEADER)).read(), \"lxml\")\n bot.log_ok(LOG_CONNECTION_SUCCESS, url)\n except Exception:\n bot.log_error(LOG_CONNECTION_FAILURE, url)\n return parser;\n\nasync def get_text(bot, parser, selector, index = 0):\n result = \"\"\n response = parser.select(selector)\n if len(response) > index:\n result = response[index].getText()\n bot.log_ok(LOG_DIV_FOUND, selector, index, response[index])\n else:\n bot.log_error(LOG_DIV_NOTFOUND, selector, index)\n return result\n" }, { "alpha_fraction": 0.5560166239738464, "alphanum_fraction": 0.5726141333580017, "avg_line_length": 23.100000381469727, "blob_id": "0512516530e2422a5f5491214d34325f912cdd48", "content_id": "c458cb4e772b1e30729560fd59117cb1dab40b05", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "permissive", "max_line_length": 51, "num_lines": 10, "path": "/src/__main__.py", "repo_name": "Gerokusu/Kayambot", "src_encoding": "UTF-8", "text": "import sys\nfrom __init__ import Bot\n\nMESSAGE_USAGE = \"Usage is python %s [name] [token]\"\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n Bot(sys.argv[1], sys.argv[2])\n else:\n print(MESSAGE_USAGE.format(sys.argv[0]))\n" }, { "alpha_fraction": 0.4893617033958435, "alphanum_fraction": 0.7234042286872864, "avg_line_length": 18, "blob_id": "f46227256b9f00fea4558593eb3de2e104dfed62", "content_id": "9a35fac25e9002228dfb498559a1273add495603", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 94, "license_type": "permissive", "max_line_length": 21, "num_lines": 5, "path": "/requirements.txt", "repo_name": "Gerokusu/Kayambot", "src_encoding": "UTF-8", "text": "colorama==0.3.9\ndiscord.py==0.16.12\nbeautifulsoup4==4.6.0\ndiscord==0.0.2\nyoutube-dl==2018.3.20" }, { "alpha_fraction": 0.6639510989189148, "alphanum_fraction": 0.670740008354187, "avg_line_length": 45.03125, "blob_id": "188d7046b0065623e2372f0dcb919a00ec76e4aa", "content_id": "82595973c3e4b072dd250bebd1629279812a551f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1479, "license_type": "permissive", "max_line_length": 108, "num_lines": 32, "path": "/src/kayambot/__init__.py", "repo_name": "Gerokusu/Kayambot", "src_encoding": "UTF-8", "text": "import asyncio\nimport discord\nfrom . import scraper\n\nMESSAGE_UNDEFINED_MONSTER = [\n \"Je ne trouve pas le mon-monstre que vous spécifiez.\",\n \"Vous êtes sûr qu'un mon-monstre possède ce nom, oga ?\",\n \"Je ne me rappelle pas avoir déjà vu ce mon-monstre quelquepart.\"\n]\n\n# To add a command, simply create a function 'async def on_COMMAND(bot, message, arguments)'\n# 'bot' is the original bot object\n# 'message' is the original message object\n# 'arguments' is an array containing all the command arguments\n\nasync def on_monster(bot, message, arguments):\n if len(arguments) > 0:\n monster_id = arguments[0]\n monster_url = scraper.URL_MONSTER.format(monster_id)\n parser = await scraper.get_site(bot, monster_url)\n if parser != None:\n monster_name = await scraper.get_text(bot, parser, \"h3[itemprop='name']\")\n monster_hp = await scraper.get_text(bot, parser, \".card .card-block .lead\", 0) + \"PV\";\n monster_colour = discord.Colour(0x0B3372);\n monster_icon = scraper.URL_MONSTER_ICON.format(monster_id)\n\n embed = discord.Embed(title=\"\", description=monster_hp, colour=monster_colour)\n embed.set_author(name=monster_name, url=monster_url, icon_url=scraper.URL_MONSTER_ICON_CONSTANT)\n embed.set_thumbnail(url=monster_icon)\n await bot.client.send_message(message.channel, \"\", embed=embed);\n else:\n await bot.speak(MESSAGE_UNDEFINED_MONSTER)\n" } ]
7
kkourt/nsview
https://github.com/kkourt/nsview
26af0ee5c2f5c20a8c68ced502b626734759e8d4
9719880183da4920858acb1b167ca35540c88584
9c87e387b58ce8cbcf83a855738bcd873c0bb46e
refs/heads/master
2022-11-22T16:07:52.785956
2020-07-29T19:53:06
2020-07-29T19:53:06
283,151,795
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5081196427345276, "alphanum_fraction": 0.5108262300491333, "avg_line_length": 38.8863639831543, "blob_id": "1c3e816c24d364ba054bffe5b01c5f016b111ee7", "content_id": "bca727ad3cffe49f2efcf693a08db1e143185fc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7020, "license_type": "no_license", "max_line_length": 130, "num_lines": 176, "path": "/nsview.py", "repo_name": "kkourt/nsview", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport json\nimport subprocess as sp\nfrom pprint import pprint\nimport sys\n\nclass Links:\n def __init__(self, d):\n self.links = []\n self.links_by_ifindex = {}\n self.links_by_ifname = {}\n for link in d:\n self.links.append(link)\n ifindex = link[\"ifindex\"]\n self.links_by_ifindex[ifindex] = link\n ifname = link[\"ifname\"]\n self.links_by_ifname[ifname] = link\n\n assert len(self.links) == len(self.links_by_ifindex)\n assert len(self.links) == len(self.links_by_ifname)\n\nclass Namespaces:\n def __init__(self, d):\n self.namespaces = []\n self.namespaces_by_ns = {}\n self.namespaces_by_netnsid = {}\n for ns_info in d[\"namespaces\"]:\n self.namespaces.append(ns_info)\n ns = ns_info[\"ns\"]\n self.namespaces_by_ns[ns] = ns_info\n netnsid = ns_info[\"netnsid\"]\n if netnsid != \"unassigned\":\n self.namespaces_by_netnsid[int(netnsid)] = ns_info\n assert len(self.namespaces) == len(self.namespaces_by_ns)\n # multiple might have an unisnged namespace\n #assert len(self.namespaces) == len(self.namespaces_by_netnsid)\n\n def set_links(self):\n for ns_info in self.namespaces:\n links = get_links(ns_info[\"nsfs\"])\n ns_info[\"links\"] = links\n\n try:\n bpf_progs = get_bpf_net_progs(ns_info[\"nsfs\"])[0]\n except:\n continue\n\n for (ty,progl) in bpf_progs.items():\n for prog in progl:\n link1 = links.links_by_ifname[prog[\"devname\"]]\n link2 = links.links_by_ifindex[prog[\"ifindex\"]]\n assert link1 == link2\n progs = link1.get(\"bpf_progs\", [])\n progs.append({\n \"type\": ty,\n \"kind\": prog[\"kind\"],\n \"name\": prog[\"name\"],\n })\n link1[\"bpf_progs\"] = progs\n\n def set_namespaces(self):\n for ns_info in self.namespaces:\n namespaces = get_namespaces(ns_info)\n ns_info[\"children\"] = namespaces\n\n\n\ndef get_bpf_net_progs(nsfs):\n cmd = \"sudo $(which nsenter) -n%s $(which bpftool) -j net show\" % (nsfs,)\n ip = sp.run(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)\n if ip.returncode != 0:\n raise RuntimeError(\"cmd: %s failed (%d)\\n%s\" % (cmd, ip.returncode, ip.stderr.decode(\"utf-8\")))\n txt = ip.stdout.decode(\"utf-8\")\n bpf_progs = json.loads(txt)\n return bpf_progs\n\ndef get_links(nsfs):\n cmd = \"sudo $(which nsenter) -n%s $(which ip) -j addr\" % (nsfs,)\n ip = sp.run(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)\n if ip.returncode != 0:\n raise RuntimeError(\"cmd: %s failed (%d)\\n%s\" % (cmd, ip.returncode, ip.stderr.decode(\"utf-8\")))\n txt = ip.stdout.decode(\"utf-8\")\n links = Links(json.loads(txt))\n return links\n\n\ndef get_namespaces(ns=None):\n if ns is None:\n prefix = \"sudo\"\n else:\n prefix = \"sudo $(which nsenter) -n%s\" % (ns[\"nsfs\"])\n\n cmd = \"%s $(which lsns) --json -t net\" % (prefix,)\n lsns = sp.run(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)\n if lsns.returncode != 0:\n raise RuntimeError(\"cmd: %s failed (%d)\\n%s\" % (cmd, lsns.returncode, lsns.stderr.decode(\"utf-8\")))\n txt = lsns.stdout.decode(\"utf-8\")\n\n namespaces = Namespaces(json.loads(txt))\n if ns is None:\n namespaces.set_links()\n namespaces.set_namespaces()\n\n return namespaces\n\ndef write_dot(namespaces):\n with open(\"nsview.dot\", 'w') as f:\n f.write(\"digraph G {\\n\")\n f.write(\"\\tgraph [ rankdir=\\\"LR\\\" ]\\n\")\n for ns in namespaces.namespaces:\n f.write(\"\\tsubgraph cluster_%s {\\n\" % (ns[\"ns\"],))\n f.write(\"\\t\\tlabel = \\\" namespace %s \\\"\\n\" %(ns[\"ns\"],))\n\n for link in ns[\"links\"].links:\n dotname = \"%s-%s\" % (ns[\"ns\"], link[\"ifindex\"])\n\n records = []\n dotlabel = \"<<table border=\\\"1\\\" cellborder=\\\"0\\\" bgcolor=\\\"gray\\\"> \"\n dotlabel += \"<tr><td port=\\\"name\\\" bgcolor=\\\"black\\\"><font color=\\\"white\\\">%s</font></td></tr>\" % (link[\"ifname\"])\n for ai in link[\"addr_info\"]:\n dotlabel += \"<tr><td align=\\\"left\\\">%s/%s</td></tr>\" % (ai[\"family\"],ai[\"local\"])\n for prog in link.get(\"bpf_progs\", []):\n v = (\"%s-%s-%s\") % (prog[\"type\"], prog[\"kind\"], prog[\"name\"])\n dotlabel += \"<tr><td align=\\\"left\\\">%s</td></tr>\" % (v,)\n dotlabel += \"</table>>\"\n\n f.write(\"\\t\\t\\\"%s\\\" [\\n\" % (dotname,))\n #f.write(\"\\t\\t\\tlabel = \\\"%s\\\"\\n\" % (dotlabel, ))\n f.write(\"\\t\\t\\tlabel = %s\\n\" % (dotlabel, ))\n #f.write(\"\\t\\t\\tshape = record\\n\")\n f.write(\"\\t\\t\\tshape = plaintext\\n\")\n f.write(\"\\t\\t]\\n\")\n\n f.write(\"\\t}\\n\")\n\n existing_pairs = set()\n for src_namespace in namespaces.namespaces:\n for src_dev in src_namespace[\"links\"].links:\n\n # link in the same namespace\n src_link = src_dev.get(\"link\", None)\n # Ignore because it makes the graph unreadable\n src_link = None\n if src_link is not None:\n dst_namespace = src_namespace\n dst_dev = src_namespace[\"links\"].links_by_ifname[src_link]\n dotname_src = \"%s-%s\" % (src_namespace[\"ns\"], src_dev[\"ifindex\"])\n dotname_dst = \"%s-%s\" % (dst_namespace[\"ns\"], dst_dev[\"ifindex\"])\n if (dotname_dst, dotname_src) not in existing_pairs:\n f.write(\"\\t\\\"%s\\\":name -> \\\"%s\\\":name [dir=none, color=green]\\n\" % (dotname_src, dotname_dst))\n existing_pairs.add((dotname_src, dotname_dst))\n continue\n\n src_link_netnsid = src_dev.get(\"link_netnsid\", None)\n src_link_ifidx = src_dev.get(\"link_index\", None)\n if src_link_netnsid is None or src_link_ifidx is None:\n continue\n\n dst_ns = src_namespace[\"children\"].namespaces_by_netnsid[src_link_netnsid][\"ns\"]\n dst_namespace = namespaces.namespaces_by_ns[dst_ns]\n dst_dev = dst_namespace[\"links\"].links_by_ifindex[src_link_ifidx]\n dotname_src = \"%s-%s\" % (src_namespace[\"ns\"], src_dev[\"ifindex\"])\n dotname_dst = \"%s-%s\" % (dst_namespace[\"ns\"], dst_dev[\"ifindex\"])\n if (dotname_dst, dotname_src) not in existing_pairs:\n f.write(\"\\t\\\"%s\\\":name -> \\\"%s\\\":name [dir=none, color=red]\\n\" % (dotname_src, dotname_dst))\n existing_pairs.add((dotname_src, dotname_dst))\n\n f.write(\"}\\n\")\n\ndef main():\n nses = get_namespaces()\n write_dot(nses)\n\nif __name__ == '__main__':\n main()\n" } ]
1
aklira/weatherstation
https://github.com/aklira/weatherstation
bbe8d58342c4fd2dde462553dfd580dca603d040
0cc98bf2700927807111b1f926f1a020fc0e4600
ed22267800f4c63a38950eaa281ae878d6814bbf
refs/heads/master
2020-07-30T09:35:51.831816
2020-01-02T12:04:31
2020-01-02T12:04:31
210,176,594
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5575027465820312, "alphanum_fraction": 0.5772179365158081, "avg_line_length": 19.288888931274414, "blob_id": "a4f00caa99500780fcd20fdeb4aab01e2e0275e2", "content_id": "7d08ebaba356cbb3aa8afeccc09a4868d4c11355", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 913, "license_type": "permissive", "max_line_length": 78, "num_lines": 45, "path": "/src/run.py", "repo_name": "aklira/weatherstation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n# info\n__version__ = \"0.1\"\n__author__ = \"Akli R\"\n__date__ = \"04/10/19\"\n\n\nimport mqtt_client as mqttc\nimport tnh as sensor\n\nimport time\nimport sys\n\nimport logging\nlogging.basicConfig()\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\nimport traceback\n\nerrCnt = 0\n\nconf_mqtt = '/appli/conf/config_mqtt.yml'\n\ndef main():\n\n while 1:\n try:\n log.info('reading sensors values')\n payload = sensor.read() \n\n log.info('sending payload to remote mqtt broker')\n mqttc.send_to_mqtt_broker(conf_mqtt, str(payload))\n log.info('sleeping for 30 min')\n except:\n errCnt += 1\n tb = traceback.format_exc()\n log.debug(\"!mqtt_client:\\terrCnt: %s; last tb: %s\" % (errCnt, tb))\n finally:\n time.sleep(1800)\n\n# script entry point\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6175040602684021, "alphanum_fraction": 0.6499189734458923, "avg_line_length": 16.16666603088379, "blob_id": "65208761e86c6ee4a2f9ace3a7ff71067b3e3e47", "content_id": "013cefee204f504c8cbcfa81d81f21dea5b06e4f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 617, "license_type": "permissive", "max_line_length": 40, "num_lines": 36, "path": "/src/rainfall.py", "repo_name": "aklira/weatherstation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n# info\n__version__ = \"0.1\"\n__author__ = \"Akli R\"\n__date__ = \"04/10/19\"\n\nfrom gpiozero import Button\nimport time\n\nBUCKET_SIZE = 0.2794\nrain_count = 0\nrain_interval = 5\n\ndef bucket_tipped():\n global rain_count\n rain_count += 1\n\ndef reset_rainfall():\n global rain_count\n rain_count = 0\n\ndef calculate_rainfall():\n global rain_count\n rainfall = rain_count * BUCKET_SIZE\n \n return rainfall\n\n\nrain_sensor = Button(6)\nrain_sensor.when_pressed = bucket_tipped\n\nwhile True:\n rain_count = 0\n time.sleep(rain_interval)\n print(calculate_rainfall(), \"mm\")" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 44, "blob_id": "4b8025cc053339eef5e22e0c458cb12c69cae9cc", "content_id": "e0df9e43b7a1845880ac0d6ff6942a10614edf6b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 90, "license_type": "permissive", "max_line_length": 72, "num_lines": 2, "path": "/README.md", "repo_name": "aklira/weatherstation", "src_encoding": "UTF-8", "text": "# weatherstation\nweather station temp and humidity, wind speed and direction and rainfall\n" }, { "alpha_fraction": 0.591500461101532, "alphanum_fraction": 0.6036426424980164, "avg_line_length": 24.086956024169922, "blob_id": "c547c1073267926d302e666d745b2b1cd52e4a85", "content_id": "40013c9bb45cd5191c71e6a86c532e5dd2954952", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1153, "license_type": "permissive", "max_line_length": 83, "num_lines": 46, "path": "/src/tnh.py", "repo_name": "aklira/weatherstation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n# info\n__version__ = \"0.1\"\n__author__ = \"Akli R\"\n__date__ = \"04/10/19\"\n\nimport time \nimport Adafruit_DHT\n\nimport logging\nlogging.basicConfig()\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\nsensor = Adafruit_DHT.DHT11\npin = 4\n\npayload = {}\n\ndef read():\n humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n\n try:\n if humidity is not None and temperature is not None:\n payload = {\n 'timestamp': str(time.time()),\n 'temperature': str(temperature),\n 'humidity': str(humidity)\n }\n except RuntimeError as e:\n # Reading doesn't always work! Just print error and we'll try again\n log.error(\"Reading from DHT failure: \" + e.args) \n return payload\n\n'''\nwhile True:\n try:\n if humidity is not None and temperature is not None:\n print(\"Temp: {:.1f} *C \\t Humidity: {}%\".format(temperature, humidity))\n except RuntimeError as e:\n # Reading doesn't always work! Just print error and we'll try again\n print(\"Reading from DHT failure: \", e.args)\n \n time.sleep(5)\n'''" }, { "alpha_fraction": 0.6034482717514038, "alphanum_fraction": 0.6413792967796326, "avg_line_length": 20.75, "blob_id": "97aaa6d0f27898bbb500419a7d3abbd0ca4dbb25", "content_id": "2a797087e6c1d4519072f7258c8f41208e477363", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 870, "license_type": "permissive", "max_line_length": 57, "num_lines": 40, "path": "/src/wind_speed.py", "repo_name": "aklira/weatherstation", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n# info\n__version__ = \"0.1\"\n__author__ = \"Akli R\"\n__date__ = \"04/10/19\"\n\nfrom gpiozero import Button\nimport math\nimport time\n\nwind_count = 0\nradius_cm = 9.0\nwind_interval = 5\nCM_IN_A_KM = 100000.0\nSECS_IN_AN_HOUR = 3600\nADJUSTMENT = 1.18\n\ndef spin():\n global wind_count\n wind_count += 1\n #print(\"spin\" + str(wind_count))\n\ndef calculate_speed(time_sec):\n global wind_count\n circumference_cm = (2 * math.pi) * radius_cm\n rotations = wind_count / 2.0\n dist_km = (circumference_cm * rotations) / CM_IN_A_KM\n km_per_sec = dist_km / time_sec\n km_per_hour = km_per_sec * SECS_IN_AN_HOUR\n \n return km_per_hour * ADJUSTMENT\n\nwind_speed_sensor = Button(5)\nwind_speed_sensor.when_pressed = spin\n\nwhile True:\n wind_count = 0\n time.sleep(wind_interval)\n print(calculate_speed(wind_interval), \"km/h\")\n" } ]
5
pro-D-coder/DSA-With-Python
https://github.com/pro-D-coder/DSA-With-Python
62ce62e14c2330287c1b17a6f5471f45036cb9f7
72f6ac85c8ce81e72b3e20df879c1567190a1c13
605ebf6ab91c70f19b7c947c2c47c40fa60e77e8
refs/heads/main
2023-09-04T11:51:44.403360
2021-09-25T13:26:35
2021-09-25T13:26:35
410,188,962
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5837563276290894, "alphanum_fraction": 0.5972927212715149, "avg_line_length": 24.69565200805664, "blob_id": "cc91a60ba75d920d791cb034fbe267ce8c9c2d44", "content_id": "887a20cfc2f205f257fb4b6c00b07455724f75e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/Exercises/Ex4.py", "repo_name": "pro-D-coder/DSA-With-Python", "src_encoding": "UTF-8", "text": "'''\nWrite a short Python function that takes a positive integer n and returns\nthe sum of the squares of all the positive integers smaller than n.\n'''\n\n\ndef small_sum(number):\n if number < 0:\n raise ValueError(str(\"Value Must Be Positive\"))\n sum = 0\n counter = number-1\n while counter > 0:\n sum = sum + (counter*counter)\n counter -= 1\n return sum\n \"\"\"OR\n sum([intx*intx for intx in range(int(input(\"Enter Number: \")),2,-1)]) \n \"\"\"\n\n\nif __name__ == '__main__':\n num = int(input(\"Enter A Number: \"))\n print(\"Sum = {0}\".format(small_sum(num)))\n" }, { "alpha_fraction": 0.527179479598999, "alphanum_fraction": 0.5333333611488342, "avg_line_length": 24.657894134521484, "blob_id": "e6d305b013d8ec38d5c3e5bd8edb03e536c60543", "content_id": "6d21fba4d20b5c24e97e1c5572a2b6ea198ef646", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 975, "license_type": "no_license", "max_line_length": 73, "num_lines": 38, "path": "/Exercises/Ex3.py", "repo_name": "pro-D-coder/DSA-With-Python", "src_encoding": "UTF-8", "text": "'''\nWrite a short Python function, minmax(data), that takes a sequence of\none or more numbers, and returns the smallest and largest numbers, in the\nform of a tuple of length two. Do not use the built-in functions min or\nmax in implementing your solution\n'''\n\n\ndef minmax(data):\n min = data[0]\n max = data[0]\n if len(data) == 1:\n return (data[0], data[0])\n for i, j in enumerate(data):\n if(i == 0):\n continue\n if(j >= max):\n max = j\n if(j <= min):\n min = j\n\n return (min, max)\n\n\nif __name__ == \"__main__\":\n try:\n input_seq = list()\n print(\"Press q for stopping the input\")\n while(True):\n value = input(\"Enter Number: \")\n if value != 'q':\n input_seq.append(int(value))\n else:\n break\n print(minmax(input_seq))\n except(ValueError):\n print(\"Passing Value to MinMax\")\n print(minmax(input_seq))\n" }, { "alpha_fraction": 0.5876068472862244, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 23.63157844543457, "blob_id": "1185ee7a55775cf7ff3b01e780b6c515381ca2fe", "content_id": "a19e0f99cc21c2c80b93043eb95639b9dbdbecd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 73, "num_lines": 19, "path": "/Exercises/Ex1.py", "repo_name": "pro-D-coder/DSA-With-Python", "src_encoding": "UTF-8", "text": "'''\nWrite a short Python function, is multiple(n, m), that takes two integer\nvalues and returns True if n is a multiple of m, that is, n = mi for some\ninteger i, and False otherwise\n'''\n\n\ndef is_multiple(n, m):\n for i in range(1, m):\n product = i * n\n if(product == m):\n return True\n return False\n\n\nif __name__ == \"__main__\":\n n = int(input(\"Enter Number: \"))\n m = int(input(\"Enter Another Number: \"))\n print(is_multiple(n, m))\n" }, { "alpha_fraction": 0.6224066615104675, "alphanum_fraction": 0.634854793548584, "avg_line_length": 25.77777862548828, "blob_id": "313b5f18c5084bf2bf7187d8e601a4ad3df08388", "content_id": "a4e1aa0aee9a31c242ee4c80c5b9e8b92b7e2566", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 74, "num_lines": 18, "path": "/Exercises/Ex2.py", "repo_name": "pro-D-coder/DSA-With-Python", "src_encoding": "UTF-8", "text": "'''\nWrite a short Python function, is_even(k), that takes an integer value and\nreturns True if k is even, and False otherwise. However, your function\ncannot use the multiplication, modulo, or division operators.\n'''\n\n\ndef is_even(number):\n unit_digit = ['0', '2', '4', '6', '8']\n last_digit = number[len(number) - 1]\n if last_digit in unit_digit:\n return True\n return False\n\n\nif __name__ == '__main__':\n num = input('Enter A Number: ')\n print(is_even(num))\n" }, { "alpha_fraction": 0.7115384340286255, "alphanum_fraction": 0.7115384340286255, "avg_line_length": 47, "blob_id": "15448387010a10bd998dadda3dcd73b8337ded22", "content_id": "2c85e1d0aacc0812246ac5b29a265e6ddc9df156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 106, "license_type": "no_license", "max_line_length": 76, "num_lines": 2, "path": "/README.md", "repo_name": "pro-D-coder/DSA-With-Python", "src_encoding": "UTF-8", "text": "# DSA With Python\r\n ## This Repository Is Created For Data Structure And Algorithm With Python.\r\n \r\n \r\n" }, { "alpha_fraction": 0.5508317947387695, "alphanum_fraction": 0.5711644887924194, "avg_line_length": 24.761905670166016, "blob_id": "1e0611977ca1b479f2f741adffc18910ad9c72ef", "content_id": "f18a3de8415274fade9c1da19a7dbccacac2bc66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 541, "license_type": "no_license", "max_line_length": 79, "num_lines": 21, "path": "/Exercises/Ex5.py", "repo_name": "pro-D-coder/DSA-With-Python", "src_encoding": "UTF-8", "text": "'''\nWrite a short Python function that takes a positive integer n and returns\nthe sum of the squares of all the odd positive integers smaller than n\n'''\n\n\ndef small_odd_sum(number):\n sum = 0\n if(number < 0):\n raise ValueError(\"Must be Positive\")\n for i in range(number-1, 2, -1):\n if i % 2 != 0:\n sum += i*i\n return sum\n '''OR\n sum([i*i for i in range(int(input(\"Enter Number: \")),2,-1) if i % 2 != 0])\n '''\n\n\nif __name__ == '__main__':\n print(small_odd_sum(int(input(\"Enter A Number: \"))))\n" } ]
6
CodeupClassroom/hopper-python-exercises
https://github.com/CodeupClassroom/hopper-python-exercises
ee20886234fc3ae3c30d6002fa663f1aa5710ecb
8ef5f78d0cedc185091d0b12738742b2fbf84982
b07489c5913db7e89684484eda181b2848713696
refs/heads/main
2023-08-16T14:01:07.891403
2021-10-08T18:38:24
2021-10-08T18:38:24
412,234,097
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.7118644118309021, "alphanum_fraction": 0.7118644118309021, "avg_line_length": 11, "blob_id": "fd6ce6133ee60158f1ca1f1bd24ee223f446cc93", "content_id": "ff30328e9bccbcfaaa079671c711955ee0dce04d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/hello.py", "repo_name": "CodeupClassroom/hopper-python-exercises", "src_encoding": "UTF-8", "text": "print(\"Hello\")\n\ngreeting = \"Hi, Everybody\"\n\nprint(greeting)" }, { "alpha_fraction": 0.6567796468734741, "alphanum_fraction": 0.6652542352676392, "avg_line_length": 17.230770111083984, "blob_id": "c3e36bd41fa01e3e2e00b6a305ef7e45e310ac1c", "content_id": "67af41ee5e4d189b26ccc5796ccd9df7e245080c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/sample_module2.py", "repo_name": "CodeupClassroom/hopper-python-exercises", "src_encoding": "UTF-8", "text": "from sample_module import bootcamp_name\n\nnew_name = bootcamp_name * 2\nbootcamp_name = new_name\n\ndef double_string(string):\n return string * 2\n\ndef main():\n print(double_string(bootcamp_name))\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6638298034667969, "alphanum_fraction": 0.6765957474708557, "avg_line_length": 15.857142448425293, "blob_id": "33d3a91cb0ecc5d18b8ed92bc6a7deeb293a1aaa", "content_id": "b8248794c0387a782c56d022b20f01a5a239e7ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 58, "num_lines": 14, "path": "/sample_module.py", "repo_name": "CodeupClassroom/hopper-python-exercises", "src_encoding": "UTF-8", "text": "import math\n\ndef double_the_root(num):\n return math.sqrt(num) * 2\n\ndef add_two(num):\n return num + 2\n\ndef subtract_two(num):\n return num - 2\n\nbootcamp_name = 'Codeup'\n\nsuper_secret_password_never_share_this_ever = 'passw0rd'" } ]
3
daniiarzzzzz/Python04
https://github.com/daniiarzzzzz/Python04
79c8f789821261ba57dce71ed7b11602ace20394
6fd3bd49e9a8e7e1e139a26596ba1bf35391122c
2e30c023c1bdadf9ebe5d52ecf22fd456aa21c71
refs/heads/master
2023-03-26T10:12:46.711733
2021-03-22T04:43:34
2021-03-22T04:43:34
341,426,079
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6223564743995667, "alphanum_fraction": 0.6283987760543823, "avg_line_length": 12.791666984558105, "blob_id": "a3fa3185accb8023657455e697f5d1fb7d1e6817", "content_id": "7ad1943bcf583f7ce504b6790b70a25bec455e81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 331, "license_type": "no_license", "max_line_length": 32, "num_lines": 24, "path": "/lesson1/lesson1.4.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "class Dog:\n\n def make_noise(self, times):\n return \"bark\" * times\n\n\nclass Cat:\n\n def make_noise(self, times):\n return \"meow\" * times\n\n\ndef noise(noise_maker, times):\n print(noise_maker(times))\n\n\ndef create_class(cls):\n return cls()\n\n\nanimal = Cat()\nnoise(animal.make_noise, 3)\n\nanimal2 = create_class(Cat)\n" }, { "alpha_fraction": 0.577937662601471, "alphanum_fraction": 0.577937662601471, "avg_line_length": 18.85714340209961, "blob_id": "0a2f50853eb5562ad61b75586d0fae48d6eae87d", "content_id": "fdf57db8b685707f6d4b32b398358c0439f1df66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 417, "license_type": "no_license", "max_line_length": 51, "num_lines": 21, "path": "/lesson5/lesson5.2.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "class Node:\n\n def __init__(self, name: str, parent):\n self.name = name\n self.parent = parent # object of type Node\n\n def __str__(self):\n return self.name + \", \" + str(self.parent)\n\n\nch = Node(\"Chyngyz Han\", None)\ndjchi = Node(\"Jychi\", ch)\nugd = Node(\"Ugedei\", ch)\nchgt = Node(\"Chagatai\", ch)\ntolui = Node(\"Toloi\", ch)\n\nuli = Node(\"Uluk\", djchi)\nair = Node(\"Airas\", chgt)\n\nprint(uli)\nprint(air)\n" }, { "alpha_fraction": 0.6152716875076294, "alphanum_fraction": 0.6505139470100403, "avg_line_length": 26.239999771118164, "blob_id": "e7f918c7be8183487696cec4355f6699f4f9c568", "content_id": "e0b03d70d5113f4c986735492571869e13b60173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 681, "license_type": "no_license", "max_line_length": 97, "num_lines": 25, "path": "/lesson1/lesson1.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "class Human:\n class_name = 'Homo sapiens'\n\n def __init__(self, name, gender, race, height, weight):\n self.name = name\n self.gender = gender\n self.race = race\n self.height = height\n self.weight = weight\n\n\nhuman1 = Human(height=20, name='Daniiar', weight=61, race='Mongol', gender='male')\nhuman2 = Human(height=17, name='Aman', weight=66, race='Mongol', gender='male')\n\n\n# print(human1.class_name, human1.name, human1.height, human1.weight, human1.race, human1.gender)\n# print(human1.class_name, human2.name, human2.height, human2.weight, human2.race, human2.gender)\n\n\ndef infinite_loop():\n while 1:\n print(\"1\")\n\n\n# infinite_loop()\n" }, { "alpha_fraction": 0.5235602259635925, "alphanum_fraction": 0.5863874554634094, "avg_line_length": 13.692307472229004, "blob_id": "fc6ee43d68fd0f05744f560ad9c3fca99dd02ffe", "content_id": "c5114744bba3a1cd068f8c12e832a51a779448f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 191, "license_type": "no_license", "max_line_length": 35, "num_lines": 13, "path": "/lesson5/lesson5.5.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "from random import randint\n\n\ndef random_list() -> list:\n arr = []\n for i in range(1, 10000000, 5):\n arr.append(i+randint(1, 4))\n\n return arr\n\n\nl = random_list()\nprint(len(l))\n" }, { "alpha_fraction": 0.6075268983840942, "alphanum_fraction": 0.6344085931777954, "avg_line_length": 10.625, "blob_id": "ee679289e1ebea1046a66c543165415534e284c2", "content_id": "8f26921bff9abbfc507ee1794cc01b6f4889eb37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 31, "num_lines": 16, "path": "/lesson1/lesson1.3.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "name = \"Daniiar\"\nage = 21\n\n\ndef function(func): # Callback\n a = func()\n return a\n\n\ndef function2():\n return \"LOl\"\n\n\na = function\nprint(a(function2))\nprint(function(function2))\n" }, { "alpha_fraction": 0.5313432812690735, "alphanum_fraction": 0.5537313222885132, "avg_line_length": 18.705883026123047, "blob_id": "32fdf28f1af0ee47a12ed23c668d75317447c33e", "content_id": "023058cd078c49b2fa1490b3e2885f5258b061cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 670, "license_type": "no_license", "max_line_length": 51, "num_lines": 34, "path": "/lesson1/lesson1.2.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "class Bird:\n\n def __init__(self, type, size, color):\n self.type = type\n self.size = size\n self.color = color\n\n def fly(self):\n if self.size > 100:\n print(\"Can't fly\")\n else:\n print(\"Can fly\")\n\n def give_eggs(self):\n return f\"Eggs {self.type}\"\n\n\nfor i in range(1, 1):\n bird_list = []\n type = input()\n size = input()\n color = input()\n Bird(type, size, color)\n bird_list.append(Bird)\n\nbird = Bird(type='pigeon', size=20, color='white')\nbird.fly()\neggs = bird.give_eggs()\n\nbird2 = Bird(type='straus', size=200, color='dark')\nbird2.fly()\neggs2 = bird2.give_eggs()\n\nprint(eggs, eggs2)\n" }, { "alpha_fraction": 0.7118644118309021, "alphanum_fraction": 0.7457627058029175, "avg_line_length": 15.571428298950195, "blob_id": "7c91da07677f9af0dd0b9d32acb1680cdae6bbe2", "content_id": "c6e095f1a1cae8eebb5962628b3e59a77c041bee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 29, "num_lines": 7, "path": "/lesson3/lesson3.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "from lesson1.lesson1 import *\n\n# l.Human()\nprint(human1)\nprint(human2)\nprint(infinite_loop)\nprint(Human.class_name)\n\n\n" }, { "alpha_fraction": 0.6276252269744873, "alphanum_fraction": 0.6340872645378113, "avg_line_length": 17.650793075561523, "blob_id": "2bb587d06aa9b52062b3b72262abce4b348cafc8", "content_id": "517fc3a3a2fea44c65d2df4a9c671e68fdab5396", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1238, "license_type": "no_license", "max_line_length": 50, "num_lines": 63, "path": "/lesson7/hw.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "class ORM:\r\n\r\n @classmethod\r\n def create_table(cls, fields: list):\r\n print(cls.__name__.lower())\r\n raise NotImplemented\r\n\r\n def update(self):\r\n raise NotImplemented\r\n\r\n def delete(self):\r\n raise NotImplemented\r\n\r\n @classmethod\r\n def m2m_relationship(cls, other_cls):\r\n raise NotImplemented\r\n\r\n @classmethod\r\n def foreign_key(cls, other_cls):\r\n raise NotImplemented\r\n\r\n @classmethod\r\n def get_data_from_m2m(cls, rel):\r\n raise NotImplemented\r\n\r\n @classmethod\r\n def get_data_from_foreign(cls, rel):\r\n raise NotImplemented\r\n\r\n def set_data_to_m2m(self, rel):\r\n raise NotImplemented\r\n\r\n def set_data_to_foreign(self, rel):\r\n raise NotImplemented\r\n\r\n\r\nclass Student(ORM):\r\n pass\r\n\r\n\r\nclass Course(ORM):\r\n pass\r\n\r\n\r\nclass Laptop(ORM):\r\n pass\r\n\r\n\r\nStudent.create_table()\r\nCourse.create_table()\r\n\r\ncourse = Course(1, \"sdasd\")\r\nstudent = Student(1, \" asdf,asd,f a,sdf,a,sdf,as\")\r\nstudent.update()\r\nstudent.delete()\r\nStudent.m2m_relationship(Course)\r\nStudent.foreign_key(Laptop)\r\n\r\nStudent.get_data_from_m2m(course)\r\nStudent.get_data_from_foreign(course)\r\n\r\nstudent.set_data_to_foreign(course)\r\nstudent.set_data_to_m2m(course)\r\n" }, { "alpha_fraction": 0.5641527771949768, "alphanum_fraction": 0.5866796970367432, "avg_line_length": 25.86842155456543, "blob_id": "686f5cb97953995da8b6e7d98ed5c3b7be44f921", "content_id": "8b4a2643dc1816d27fb77e42d737c88459908aec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1021, "license_type": "no_license", "max_line_length": 61, "num_lines": 38, "path": "/lesson1/hw.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "class Fraction:\n\n def __init__(self, num, denum):\n if denum == 0:\n raise ValueError(\"Denumenator can't 0\")\n\n self.num = num\n self.denum = denum\n\n def __add__(self, other):\n num = self.num * other.denum + other.num * self.denum\n denum = other.denum + self.num\n return Fraction(num, denum)\n\n def __sub__(self, other):\n num = self.num * other.denum - other.num * self.denum\n denum = other.denum + self.num\n return Fraction(num, denum)\n\n def multiply(self, other):\n num = self.num * other.denum\n denum = other.denum * self.num\n return Fraction(num, denum)\n\n def div(self, other):\n num = self.num * other.num\n denum = other.denum * self.denum\n return Fraction(num, denum)\n\n\nfraction1 = Fraction(1, 2) # 1/2\nfraction2 = Fraction(2, 3) # 2/3\n# fraction3 = fraction1 - fraction2 + fraction2 - fraction2\n#\nfor i in range(10):\n fraction2 += fraction1\n\nprint(fraction2.num, \"|\", fraction2.denum)\n" }, { "alpha_fraction": 0.6579925417900085, "alphanum_fraction": 0.6765799522399902, "avg_line_length": 15.8125, "blob_id": "292f5fe3ff1b854b876a27c875ed2209cfaf4097", "content_id": "c9d5345a7dc11bed70e3e710f30e5f5224d564ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 269, "license_type": "no_license", "max_line_length": 74, "num_lines": 16, "path": "/lesson3/lesson3.3.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "import bs4\nimport requests\n\nhtml = requests.get(\"https://24.kg/\")\n\n# print(html.content)\n\nbs = bs4.BeautifulSoup(str(html.content, encoding='utf-8'), 'html.parser')\n\nnews_list = bs.find_all('div', class_='one')\n\nfor i in news_list:\n print(i.text)\n\n\nprint(news_list)\n" }, { "alpha_fraction": 0.6028168797492981, "alphanum_fraction": 0.627464771270752, "avg_line_length": 23.06779670715332, "blob_id": "7a4759543e24168f55933c20febbbd4bdeae4512", "content_id": "f7b69df7e26fa7591dc5e236994459904e4a8a5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1420, "license_type": "no_license", "max_line_length": 94, "num_lines": 59, "path": "/lesson2/lesson2.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "class Animal:\n\n def __init__(self, color, size, brain):\n self.color = color\n self.size = size\n self.brain = brain\n\n def birth(self):\n return \"YA RODYLSYA\"\n\n def live(self):\n return \"YA JIVU\"\n\n def death(self):\n return \"YA UMER\"\n\n\nclass Parrot(Animal):\n\n def __init__(self, color, size, brain, wings, tail, beak):\n super(Parrot, self).__init__(color, size, brain)\n self.wings = wings\n self.tail = tail\n self.beak = beak\n\n def birth(self):\n return \"YA VYLUPILSYA\"\n\n\nclass Kakadu(Parrot):\n\n def __init__(self, color, size, brain, wings, tail, beak, scallop):\n super(Kakadu, self).__init__(color, size, brain, wings, tail, beak)\n self.scallop = scallop\n\n def live(self):\n return \"YA LIVE IN A JUNGLE\"\n\n def death(self):\n return \"HEROIC DEATH\"\n\n\nanimal = Animal(color='white', size=100, brain=5)\nparrot = Parrot(color='black', size=100, brain=5, wings=1000000, tail=10, beak=3)\nkakadu = Kakadu(color='black', size=100, brain=5, wings=1000000, tail=10, beak=3, scallop=312)\n\nprint(animal.color, animal.size, animal.brain)\nprint(parrot.color, parrot.size, parrot.brain, parrot.wings)\nprint(kakadu.birth())\nprint(parrot.birth())\nprint(animal.birth())\nprint()\nprint(kakadu.live())\nprint(parrot.live())\nprint(animal.live())\nprint()\nprint(kakadu.death())\nprint(animal.death())\nprint(parrot.death())\n" }, { "alpha_fraction": 0.5744186043739319, "alphanum_fraction": 0.5744186043739319, "avg_line_length": 13.82758617401123, "blob_id": "5e879b9b1f3edc3ec23d943daa00ff436369fff3", "content_id": "65ae0e4b8d780e36e228f2dc5ed4b5b1a99adcb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 29, "num_lines": 29, "path": "/lesson5/lesson5.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "class Stack:\n\n def __init__(self, arr):\n self.arr = arr\n\n def add(self, elem):\n self.arr.append(elem)\n\n def remove(self):\n self.arr.pop()\n\n def __str__(self):\n return str(self.arr)\n\n\nstack = Stack([])\nstack.add(\"a\")\nstack.add(\"i\")\nstack.add(\"r\")\nstack.add(\"a\")\nstack.add(\"s\")\nprint(stack)\nstack.remove()\nstack.remove()\nprint(stack)\nstack.add(\"A\")\nstack.add(\"S\")\nstack.add(\"$\")\nprint(stack)\n" }, { "alpha_fraction": 0.60550457239151, "alphanum_fraction": 0.6128440499305725, "avg_line_length": 11.674418449401855, "blob_id": "c1631d21ee798fd14b0a043ae6c337a37377bc0b", "content_id": "36efca5b5e67e5b809a34925dd7ce12bca1f84c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 545, "license_type": "no_license", "max_line_length": 45, "num_lines": 43, "path": "/lesson2/lesson2.2.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "class Tech:\n\n def __init__(self, energy):\n self.energy = energy\n\n\nclass Phone(Tech):\n\n def __init__(self, size):\n self.size = size\n\n\nclass TouchScreenWork:\n\n def touch(self):\n print(\"TouchScreenWork\")\n\n\nclass KeyboardWork:\n\n def press(self):\n print(\"KeyboardWork\")\n\n\nclass Samsung(Phone):\n\n def __init__(self, ):\n pass\n\n\nclass S21(TouchScreenWork, Samsung):\n pass\n\n\nclass KeyboardSamsung(KeyboardWork, Samsung):\n pass\n\n\nk = KeyboardSamsung()\nts = S21()\n\nprint(k.press())\nprint(ts.touch())\n" }, { "alpha_fraction": 0.7155555486679077, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 23.22222137451172, "blob_id": "43e4566f1781804e705859f580879ac06fee9a42", "content_id": "6d87413ea54b7a8c5112acf71737652d42f22c34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 82, "num_lines": 9, "path": "/lesson7/lesson7.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "import sqlite3\r\n\r\nconnection = sqlite3.connect(\"../lesson6/db.sqlite3\")\r\n\r\ncursor = connection.cursor()\r\ncursor.execute(\"CREATE TABLE course (id INTEGER PRIMARY KEY, name TEXT NOT NULL)\")\r\n\r\ncursor.close()\r\nconnection.close()" }, { "alpha_fraction": 0.49438202381134033, "alphanum_fraction": 0.5224719047546387, "avg_line_length": 14.70588207244873, "blob_id": "12b7605d5c328c17556e8600a5a02f4d1b4c3b57", "content_id": "b4a937d6a9eaf4e73eafb4270bfed31bd79e3d0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 36, "num_lines": 34, "path": "/lesson2/lesson2.3.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "num = 19 == 19\n\n\nclass Age:\n\n def __init__(self, age):\n self.age = age\n\n def __add__(self, other):\n age = self.age + other.age\n return Age(age)\n\n def __sub__(self, other):\n pass\n\n def __getitem__(self, key):\n return self.age\n\n def __eq__(self, other):\n return self.age == other.age\n\n @staticmethod\n def print_value():\n print(\"asdf\")\n\n @property\n def get_age(self):\n return self.age * 10\n\n\nage1 = Age(90)\nage2 = Age(90)\nage3 = age2\nprint(age3.get_age)\n" }, { "alpha_fraction": 0.5047022104263306, "alphanum_fraction": 0.5235109925270081, "avg_line_length": 13.409090995788574, "blob_id": "94b3a2f8a6180acf6f81129d9b362ab3fda49368", "content_id": "dc11d8c5f7b13515eacafc07b5cff827a0e3eb19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "no_license", "max_line_length": 29, "num_lines": 22, "path": "/lesson5/lesson5.1.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "# Data structures\n\nclass Queue:\n\n def __init__(self, arr):\n self.arr = arr\n\n def add(self, elem):\n self.arr.append(elem)\n\n def remove(self):\n self.arr.pop(0)\n\n def __str__(self):\n return str(self.arr)\n\n\nq = Queue([1, 2, 3, 4])\nprint(q)\nq.add(5)\nq.remove()\nprint(f\"q is :{q}\")\n\n\n" }, { "alpha_fraction": 0.7162162065505981, "alphanum_fraction": 0.7162162065505981, "avg_line_length": 13.800000190734863, "blob_id": "ccc41960e327dd016e10c943fefcbee0b2734879", "content_id": "3edd19c31a29b842efc615237f540e1af63012c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 26, "num_lines": 5, "path": "/lesson3/lesson3.2.py", "repo_name": "daniiarzzzzz/Python04", "src_encoding": "UTF-8", "text": "import wikipedia as wiki\n\nwiki.set_lang('ru')\n\nprint(wiki.search('Beki'))\n" } ]
17
BenjaminLaprise/scripts
https://github.com/BenjaminLaprise/scripts
e0c778b5f38e80cd373c11aec27b6183c59ad81d
5d0f33eb00911d1bf2ff66d031809b9de2ce9569
ce284ee7eb21bf071fa66ff8f08bfe4c06ee4669
refs/heads/master
2020-04-08T20:33:40.411008
2019-07-28T15:47:05
2019-07-28T15:47:05
121,554,107
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7089946866035461, "alphanum_fraction": 0.7195767164230347, "avg_line_length": 26, "blob_id": "0d29f05cf6ccc170917761afc1b9e9ef8828cceb", "content_id": "e22b88ba2782be988ea57ee49005acd1c07d7e48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 189, "license_type": "no_license", "max_line_length": 59, "num_lines": 7, "path": "/random_vertical_wp.sh", "repo_name": "BenjaminLaprise/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nimage_directory=\"$HOME/Images/vertical_wallpapers\"\n\nwallpaper=$(ls $image_directory | sort -R | head -n 1)\n\nnitrogen --set-scaled --head=$1 $image_directory/$wallpaper\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 24.66666603088379, "blob_id": "4e59560aebee757347bc56f45c12e45a2544f953", "content_id": "c6726a2f6fb5f394b8b930734df7bcca55096327", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 77, "license_type": "no_license", "max_line_length": 55, "num_lines": 3, "path": "/get_vertical_monitors.sh", "repo_name": "BenjaminLaprise/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nxrandr --listmonitors | grep \" 1080\" | cut -c 2 | xargs\n" }, { "alpha_fraction": 0.5814648866653442, "alphanum_fraction": 0.5874439477920532, "avg_line_length": 31.370967864990234, "blob_id": "743d5c81e44444c0fc780e606231276cdad78a96", "content_id": "5f382656a3d2475630d437f17709a8c5a43c20ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2007, "license_type": "no_license", "max_line_length": 102, "num_lines": 62, "path": "/auto_xrandr_daemon.py", "repo_name": "BenjaminLaprise/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys\nimport time\nimport subprocess\n\nfrom daemon import Daemon\n\n\ndef get(cmd): return subprocess.check_output(cmd).decode(\"utf-8\")\n\n\ndef count_screens(monitors): return monitors.count(\" connected \")\n\n\ndef run_command(cmd): subprocess.Popen([\"/bin/bash\", \"-c\", cmd])\n\ndef get_vertical_monitors():\n output = subprocess.run([\"bash\", \"-c\", \"~/scripts/get_vertical_monitors.sh\"], capture_output=True)\n output = output.stdout.decode('utf-8')\n return output.strip().split()\n\ndef set_wallpapers(monitor_count):\n vertical_monitors = get_vertical_monitors()\n for monitor in range(monitor_count):\n if str(monitor) in vertical_monitors:\n run_command(\"~/scripts/random_vertical_wp.sh %s\" % monitor)\n else:\n run_command(\"~/scripts/random_wp.sh %s\" % monitor)\n\nclass AutoXRandrDaemon(Daemon):\n def run(self):\n print('Auto XRandr Daemon started')\n monitor_count = None\n while True:\n time.sleep(1)\n new_monitor_count = count_screens(get([\"xrandr\"]))\n if new_monitor_count != monitor_count:\n print('Monitor count: %s' % new_monitor_count)\n run_command(\"~/.screenlayout/default_%s.sh\" % new_monitor_count)\n run_command(\"~/.config/polybar/launch.sh\")\n set_wallpapers(new_monitor_count)\n monitor_count = new_monitor_count\n\nif __name__ == \"__main__\":\n daemon = AutoXRandrDaemon('/tmp/auto-xrandr-daemon.pid')\n if len(sys.argv) == 2:\n if 'start' == sys.argv[1]:\n daemon.start()\n elif 'stop' == sys.argv[1]:\n daemon.stop()\n run_command('killall polybar')\n elif 'run' == sys.argv[1]:\n daemon.restart()\n elif 'restart' == sys.argv[1]:\n daemon.restart()\n else:\n print(\"Unknown command\")\n sys.exit(2)\n sys.exit(0)\n else:\n print(\"usage: %s start|stop|restart\" % sys.argv[0])\n sys.exit(2)\n" }, { "alpha_fraction": 0.7414966225624084, "alphanum_fraction": 0.7653061151504517, "avg_line_length": 41, "blob_id": "d24e8d2e731a56a2ccf82012639db1e17cf9fb2a", "content_id": "426749c45bc80ee2632a939c1af867cb4b8be824", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 294, "license_type": "no_license", "max_line_length": 105, "num_lines": 7, "path": "/lock.sh", "repo_name": "BenjaminLaprise/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nscrot /tmp/screen_locked.png\nconvert /tmp/screen_locked.png -scale 10% -scale 1000% /tmp/blured_screen_locked.png\nplayerctl pause\n~/scripts/auto_xrandr_daemon.py stop\ni3lock -i /tmp/blured_screen_locked.png --ignore-empty-password -n; ~/scripts/auto_xrandr_daemon.py start\n" }, { "alpha_fraction": 0.3732660710811615, "alphanum_fraction": 0.3972257375717163, "avg_line_length": 21.657142639160156, "blob_id": "39ac0a4bd90674d582c44d17cb700da2bfd81364", "content_id": "fffbf1847099ffbe1ef7eb55f289aee2b24aec02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 793, "license_type": "no_license", "max_line_length": 79, "num_lines": 35, "path": "/web-search.bak", "repo_name": "BenjaminLaprise/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\n# -----------------------------------------------------------------------------\n# Info:\n# author: Miroslav Vidovic\n# file: web-search.sh\n# created: 24.02.2017.-08:59:54\n# revision: ---\n# version: 1.0\n# -----------------------------------------------------------------------------\n# Requirements:\n# rofi\n# Description:\n# Use rofi to search the web.\n# Usage:\n# web-search.sh\n# -----------------------------------------------------------------------------\n# Script:\n\nmain() {\n # Pass the list to rofi\n platform=\"https://www.google.com/search?q=\"\n\n query=$( (echo ) | rofi -dmenu -l 0 -location 0 -p \"Query > \" )\n if [[ -n \"$query\" ]]; then\n url=$platform$query\n xdg-open \"$url\"\n else\n rofi -show -e \"No query provided.\"\n fi\n}\n\nmain\n\nexit 0\n" }, { "alpha_fraction": 0.5333333611488342, "alphanum_fraction": 0.5444444417953491, "avg_line_length": 9, "blob_id": "aa2fbd66ee02f34cdbfd516790fb23a029fe75b3", "content_id": "cf1a2db3b269878a5e5e58701010033def50c495", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 90, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/cheat.sh", "repo_name": "BenjaminLaprise/scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env sh\n\nset -u\n\ntopic=$1\n\nshift\n\ncurl cht.sh/$topic/$(echo $@ | sed \"s/ /+/g\")\n" } ]
6
kaptoxa/webhook_aiogram_without_nginx_bot
https://github.com/kaptoxa/webhook_aiogram_without_nginx_bot
c8c8784f3234311008596b20a2d2c6bec8f01ed0
0bcdf17190ab9acb4e6b9bf202fd589b827664fa
152cf6721c30817bf456771179ea9c8c4b288aec
refs/heads/master
2023-02-18T10:32:20.226767
2021-05-24T03:27:37
2021-05-24T03:27:37
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5833333134651184, "alphanum_fraction": 0.6512345671653748, "avg_line_length": 20.600000381469727, "blob_id": "bca58d28718841b9a164f0a5bb2210fb1c2acc2a", "content_id": "25e3159329dbb1f31ffc0bf324fd4f7967d1c519", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 42, "num_lines": 15, "path": "/config.py", "repo_name": "kaptoxa/webhook_aiogram_without_nginx_bot", "src_encoding": "UTF-8", "text": "# API_TOKEN тут надо добавить токен бота\n\nURL=\"212.8.247.152\"\nPORT=\"8443\"\n\n\nWEBHOOK_URL_BASE = f'https://{URL}:{PORT}'\nWEBHOOK_PATH = f'/{API_TOKEN}/'\nWEBHOOK_URL_PATH = f'/{API_TOKEN}/'\n\nWEBHOOK_SSL_CERT = './webhook_cert.pem'\nWEBHOOK_SSL_PRIV = './webhook_pkey.pem'\n\nWEBAPP_HOST = f'{URL}' # 127.0.0.1'\nWEBAPP_PORT = '8443'\n" }, { "alpha_fraction": 0.7257142663002014, "alphanum_fraction": 0.7471428513526917, "avg_line_length": 22.33333396911621, "blob_id": "c9199757776ea876f7915f1a86dc5c43c2e61df0", "content_id": "88f0ba7661616ad6a0eff997c8c4a7e84e9f7ffd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 700, "license_type": "no_license", "max_line_length": 96, "num_lines": 30, "path": "/README.md", "repo_name": "kaptoxa/webhook_aiogram_without_nginx_bot", "src_encoding": "UTF-8", "text": "## Telegram bot via webhook on aiogram without nginx or apache\n\n\n\n1). Generate SSL keys:\n\nsudo apt-get install openssl\n\nopenssl genrsa -out webhook_pkey.pem 2048\n\nopenssl req -new -x509 -days 3650 -key webhook_pkey.pem -out webhook_cert.pem\n\nACHTUNG! To create a certificate we have to point domain name or ip address\n\n2). Before start webhook:\n\ncontext = ssl.SSLContext()\n\ncontext.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)\n\n3). Pass as the argument:\n\nstart_webhook(..., host=WEBAPP_HOST, port=WEBAPP_PORT, ssl_context=context)\n\n\nFIN!\n\n4) Maybe it need to load certificate to telegram server\n\ncurl -F \"url=https://YOU_MEGA_URL\" -F \"[email protected]\" https://api.telegram.org/botYOU_TOKEN/setWebhook\n" }, { "alpha_fraction": 0.690731406211853, "alphanum_fraction": 0.690731406211853, "avg_line_length": 26.824562072753906, "blob_id": "e821e49f53d29a73139ce2e9a1f27fbe1b037304", "content_id": "d8e664e78424ff3717140a937b718ba697323333", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3172, "license_type": "no_license", "max_line_length": 107, "num_lines": 114, "path": "/webhook_aiogram.py", "repo_name": "kaptoxa/webhook_aiogram_without_nginx_bot", "src_encoding": "UTF-8", "text": "import logging\n\nimport ssl\n\nfrom aiogram import Bot, types\nfrom aiogram.contrib.middlewares.logging import LoggingMiddleware\nfrom aiogram.dispatcher import Dispatcher, FSMContext\nfrom aiogram.dispatcher.webhook import SendMessage\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.utils.executor import start_webhook\n\nfrom config import *\n\nlogging.basicConfig(level=logging.INFO)\n\nbot = Bot(token=API_TOKEN)\nstorage = MemoryStorage()\ndp = Dispatcher(bot, storage=storage)\ndp.middleware.setup(LoggingMiddleware())\n\n\nclass Proba(StatesGroup):\n START = State()\n SECOND = State()\n FIN = State()\n\n\[email protected]_handler(commands='start')\nasync def cmd_start(message: types.Message):\n logging.info('start command!')\n await Proba.START.set()\n await message.reply(\"Hi there! Yahho!\")\n# link_info = await get_start_link(x) # result: 'https://t.me/MyBot?start=foo'\n# print(link_info, x)\n# print(message.text)\n# await bot.send_message(message.from_user.id, phrases[state]['text'], reply_markup=get_keyboard(state))\n\n\[email protected]_handler(state=Proba.START)\nasync def echo(message: types.Message, state: FSMContext):\n cur_state = await state.get_state()\n logging.info(f' cur state = {cur_state}')\n\n async with state.proxy() as data:\n logging.info(f'message! {message.text}')\n data['saved'] = message.text\n\n await Proba.SECOND.set()\n\n return SendMessage(message.chat.id, message.text)\n\n\[email protected]_handler(state=Proba.SECOND)\nasync def echo(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n prev = data['saved']\n logging.info(f'message! {message.text}')\n data['saved'] = message.text\n\n await Proba.FIN.set()\n\n return SendMessage(message.chat.id, prev)\n\n\[email protected]_handler(state=Proba.FIN)\nasync def echo(message: types.Message, state: FSMContext):\n cur_state = await state.get_state()\n logging.info(f' cur state = {cur_state}')\n if cur_state is None:\n return\n\n return SendMessage(message.chat.id, 'FIN!')\n\n\n\nasync def on_startup(dp):\n logging.info(f\"set webhook - {WEBHOOK_URL_BASE} + {WEBHOOK_URL_PATH}\")\n await bot.set_webhook(url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH, certificate=open(WEBHOOK_SSL_CERT, 'r'))\n # insert code here to run it after start\n\n\nasync def on_shutdown(dp):\n logging.warning('Shutting down..')\n\n # insert code here to run it before shutdown\n\n # Remove webhook (not acceptable in some cases)\n await bot.delete_webhook()\n\n # Close DB connection (if used)\n await dp.storage.close()\n await dp.storage.wait_closed()\n\n logging.warning('Bye!')\n\n\nif __name__ == '__main__':\n\n context = ssl.SSLContext()\n context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)\n\n logging.info(f\"start_webhook: {WEBAPP_HOST}:{WEBAPP_PORT}{WEBHOOK_PATH}\")\n\n start_webhook(\n dispatcher=dp,\n webhook_path=WEBHOOK_PATH,\n on_startup=on_startup,\n on_shutdown=on_shutdown,\n skip_updates=False,\n host=WEBAPP_HOST,\n port=WEBAPP_PORT,\n ssl_context=context,\n )\n" } ]
3
andyj10224/test_suite
https://github.com/andyj10224/test_suite
bbeeb1f8c780f712db0c226df57317c5a2ce2cd9
df315489e8a4c0ce27a1077965aaf598881d1d5c
a0d2113dcbf0d07438d10a4e880ba183c54accf0
refs/heads/master
2023-05-04T01:24:48.315043
2021-05-20T20:39:49
2021-05-20T20:39:49
369,318,554
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.586614191532135, "alphanum_fraction": 0.6033828854560852, "avg_line_length": 38.41954040527344, "blob_id": "ef2ca4e8f7d9fb7fcd5c6ed3b57343b565eecb05", "content_id": "7465c0c5871ea4593e2d73f2a1561db313d3ba56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6858, "license_type": "no_license", "max_line_length": 109, "num_lines": 174, "path": "/test_suite/testjet.py", "repo_name": "andyj10224/test_suite", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport databases.s22 as s22\nimport databases.watercluster as watercluster\n\ndef _gen_inputs_s22(testname, subset, template_path):\n if not os.path.isdir(testname):\n os.system(f'mkdir {testname}')\n \n test_dir_path = os.path.join(testname, subset)\n if not os.path.isdir(test_dir_path):\n os.system(f'mkdir {test_dir_path}')\n \n s22_path = os.path.join(test_dir_path, 's22')\n if not os.path.isdir(s22_path):\n os.system(f'mkdir {s22_path}')\n\n input_dir_path = os.path.join(s22_path, 'inputs')\n if not os.path.isdir(input_dir_path):\n os.system(f'mkdir {input_dir_path}')\n\n template_file = open(template_path, 'r')\n template_lines = template_file.readlines()\n\n for k, v in s22.dimer.items():\n input_file_path = os.path.join(input_dir_path, f's22_dimer_{k}.in')\n input = open(input_file_path, 'w')\n for line in template_lines:\n if '### MOLECULE ###' in line:\n input.write(f'{v}\\n')\n else:\n input.write(line)\n\n for k, v in s22.monoA.items():\n input_file_path = os.path.join(input_dir_path, f's22_monoA_{k}.in')\n input = open(input_file_path, 'w')\n for line in template_lines:\n if '### MOLECULE ###' in line:\n input.write(f'{v}\\n')\n else:\n input.write(line)\n \n for k, v in s22.monoB.items():\n input_file_path = os.path.join(input_dir_path, f's22_monoB_{k}.in')\n input = open(input_file_path, 'w')\n for line in template_lines:\n if '### MOLECULE ###' in line:\n input.write(f'{v}\\n')\n else:\n input.write(line)\n\ndef _gen_inputs_watercluster(testname, subset, template_path, atomlimit=300):\n if not os.path.isdir(testname):\n os.system(f'mkdir {testname}')\n \n test_dir_path = os.path.join(testname, subset)\n if not os.path.isdir(test_dir_path):\n os.system(f'mkdir {test_dir_path}')\n \n watercluster_path = os.path.join(test_dir_path, 'watercluster')\n if not os.path.isdir(watercluster_path):\n os.system(f'mkdir {watercluster_path}')\n\n input_dir_path = os.path.join(watercluster_path, 'inputs')\n if not os.path.isdir(input_dir_path):\n os.system(f'mkdir {input_dir_path}')\n\n template_file = open(template_path, 'r')\n template_lines = template_file.readlines()\n\n for k, v in watercluster.clusters.items():\n if k > atomlimit:\n continue\n input_file_path = os.path.join(input_dir_path, f'watercluster_{k}_atoms.in')\n input = open(input_file_path, 'w')\n for line in template_lines:\n if '### MOLECULE ###' in line:\n input.write(f'{v}\\n')\n else:\n input.write(line)\n\ndef _run_watercluster(testname, subset, psipath, ncore, atomlimit=300):\n input_dir_path = os.path.join(testname, subset, 'watercluster', 'inputs')\n if not os.path.isdir(input_dir_path):\n raise Exception(\"You idiot! You have not made the inputs yet!!!\")\n \n output_dir_path = os.path.join(testname, subset, 'watercluster', 'outputs')\n if not os.path.isdir(output_dir_path):\n os.system(f'mkdir {output_dir_path}')\n\n timer_dir_path = os.path.join(testname, subset, 'watercluster', 'timings')\n if not os.path.isdir(timer_dir_path):\n os.system(f'mkdir {timer_dir_path}')\n\n for k, v in watercluster.clusters.items():\n if k > atomlimit:\n continue\n input_file_path = os.path.join(input_dir_path, f'watercluster_{k}_atoms.in')\n cmd1 = f'{psipath} -n {ncore} {input_file_path}'\n cmd2 = f'mv {input_dir_path}/watercluster_{k}_atoms.out {output_dir_path}/watercluster_{k}_atoms.out'\n cmd3 = f'mv timer.dat {timer_dir_path}/watercluster_{k}_atoms.time'\n os.system(f'{cmd1} && {cmd2} && {cmd3}')\n\ndef _run_s22(testname, subset, psipath, ncore):\n\n input_dir_path = os.path.join(testname, subset, 's22', 'inputs')\n if not os.path.isdir(input_dir_path):\n raise Exception(\"You idiot! You have not made the inputs yet!!!\")\n \n output_dir_path = os.path.join(testname, subset, 's22', 'outputs')\n if not os.path.isdir(output_dir_path):\n os.system(f'mkdir {output_dir_path}')\n\n timer_dir_path = os.path.join(testname, subset, 's22', 'timings')\n if not os.path.isdir(timer_dir_path):\n os.system(f'mkdir {timer_dir_path}')\n\n for k, v in s22.dimer.items():\n input_file_path = os.path.join(input_dir_path, f's22_dimer_{k}.in')\n cmd1 = f'{psipath} -n {ncore} {input_file_path}'\n cmd2 = f'mv {input_dir_path}/s22_dimer_{k}.out {output_dir_path}/s22_dimer_{k}.out'\n cmd3 = f'mv timer.dat {timer_dir_path}/s22_dimer_{k}.time'\n os.system(f'{cmd1} && {cmd2} && {cmd3}')\n\n for k, v in s22.monoA.items():\n input_file_path = os.path.join(input_dir_path, f's22_monoA_{k}.in')\n cmd1 = f'{psipath} -n {ncore} {input_file_path}'\n cmd2 = f'mv {input_dir_path}/s22_monoA_{k}.out {output_dir_path}/s22_monoA_{k}.out'\n cmd3 = f'mv timer.dat {timer_dir_path}/s22_monoA_{k}.time'\n os.system(f'{cmd1} && {cmd2} && {cmd3}')\n \n for k, v in s22.monoB.items():\n input_file_path = os.path.join(input_dir_path, f's22_monoB_{k}.in')\n cmd1 = f'{psipath} -n {ncore} {input_file_path}'\n cmd2 = f'mv {input_dir_path}/s22_monoB_{k}.out {output_dir_path}/s22_monoB_{k}.out'\n cmd3 = f'mv timer.dat {timer_dir_path}/s22_monoB_{k}.time'\n os.system(f'{cmd1} && {cmd2} && {cmd3}')\n\ndef gen_input_files(database, testname, subset, template_path):\n if database.lower() == 's22':\n _gen_inputs_s22(testname, subset, template_path)\n elif database.lower() == 'watercluster':\n _gen_inputs_watercluster(testname, subset, template_path)\n else:\n raise Exception(f\"Database {database} is currently not available!\")\n\ndef run_jobs(database, testname, subset, psipath, ncore):\n if os.path.isfile('timer.dat'):\n os.system('rm timer.dat')\n\n if database.lower() == 's22':\n _run_s22(testname, subset, psipath, ncore)\n elif database.lower() == 'watercluster':\n _run_watercluster(testname, subset, template_path)\n else:\n raise Exception(f\"Database {database} is currently not available!\")\n\nif __name__ == '__main__':\n args = sys.argv\n mode = args[1]\n database = args[2]\n method = args[3]\n subset = args[4]\n template = args[5]\n psipath = args[6]\n ncore = int(args[7])\n\n # Example call to the script:\n # python testjet.py generate s22 linK reference template/ref_template.in (full path of jeff4) 8\n\n if mode.lower() == 'generate':\n gen_input_files(database, method, subset, template)\n elif mode.lower() == 'run':\n run_jobs(database, method, subset, psipath, ncore)" } ]
1
PedroAfonsoMarques/TSC_Exam
https://github.com/PedroAfonsoMarques/TSC_Exam
f51bce0e928950d7527307af82eafaa6e9c2c981
88ba759aaf45450d6f8bb2af5710e0ab2fe6289f
07edc8ce3e5086ab282853ad048b8cfba7815441
refs/heads/master
2023-04-07T13:03:19.542468
2021-04-13T16:15:09
2021-04-13T16:15:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5720614194869995, "alphanum_fraction": 0.5805670619010925, "avg_line_length": 41.19132614135742, "blob_id": "1c7c503804ff01db40cc03edb977ea7bf6792801", "content_id": "3f0e3229a675aaf6cd4e153b7ee5c19e28748e8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33860, "license_type": "no_license", "max_line_length": 137, "num_lines": 784, "path": "/smoothie.py", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 12 13:56:57 2021\r\n\r\n@author: pedro\r\n\"\"\"\r\n\r\n\"inverSe Method for nOisy nOn-isoThermal slosHIng Experiments (SMOOTHIE)\"\r\n\r\n#%% Packages\r\n\r\nimport time # Check process run times\r\nimport numpy as np # Multiple reasons\r\nfrom scipy.integrate import odeint # Integrate system of ODEs\r\nfrom scipy.optimize import minimize # Minimize function (optimizer)\r\nfrom sklearn.model_selection import train_test_split # Split train/test data\r\n\r\n#%% Fluid property functions\r\n\r\n# Obtain value from the coefficients in a polynomial fit\r\ndef value_from_coeffs(coeff, x_i):\r\n \"\"\"\r\n Obtain function value from the coefficients in a polynomial fit at x-value\r\n *x_i*.\r\n \r\n **Inputs**\r\n - *coeff*: array of coefficients for polynomial fit\r\n - *x_i*: value at which the polynomial fit is computed\r\n **Outputs:**\r\n - *y_i*: result of the polynomial fit at *x_i*\r\n \"\"\"\r\n # Initialize result with zero\r\n y_i = 0\r\n # Loop through the coefficients to build the result of the fit\r\n for i in range(0,len(coeff)):\r\n # Sum a_n*x^n for all the coefficients n\r\n y_i = coeff[i]*x_i**(len(coeff)-1-i) + y_i\r\n # Return value from the fitted polynomial\r\n return y_i\r\n\r\n# Obtain data point from the polynomial fit of data\r\ndef value_from_fit(x_data, y_data, n_pol, x_i):\r\n \"\"\"\r\n Obtain data point from the polynomial fit of data. The real data is fitted\r\n with a polynomial of n_pol order, and the property is evaluated at temperature\r\n (or other condition) *x_i*.\r\n \r\n **Inputs:**\r\n - *x_data*: data in the x-axis\r\n - *y_data*: data in the y-axis\r\n - *n_pol*: order of the polynomial\r\n - *x_i*: value at which the polynomial fit is computed\r\n **Outputs:**\r\n Result of the polynomial fit at *x_i*\r\n \"\"\"\r\n # Generate coefficients from polynomial fit\r\n coeff = np.polyfit(x=x_data,y=y_data,deg=n_pol)\r\n # Return the value from the fitted polynomial\r\n return value_from_coeffs(coeff, x_i)\r\n\r\n# Obtain fluid property value (from coefficients or constant value)\r\ndef property_value(y,x_i):\r\n \"\"\"\r\n Obtain fluid property value (from coefficients or constant value) for a\r\n given temperature (or other condition) *x_i*.\r\n \r\n **Inputs:**\r\n - *y*: Can be either a scalar or an array. If it is a scalar, then y corres-\r\n ponds to the actual property of the fluid. If it is an array, the it\r\n corresponds to the array of coefficients of the polynomial fit as a\r\n function of *x_i*\r\n - *x_i*: data in the x-axis for the polynomial fit (temperature [K])\r\n **Outputs:**\r\n Fluid property *y* at temperature *x_i*\r\n \"\"\"\r\n # If the input is a scalar, simply return it. This is used because there is\r\n # no temperature information for some HFE7200 properties\r\n if np.isscalar(y) == True: return y\r\n # If the input is an array, calculate the property based on the poly coeffs\r\n else: return value_from_coeffs(y, x_i)\r\n\r\n# Clausius-Clapeyron equation\r\ndef clausius_clapeyron_p(R_v,dh,p_sat_ref,T_sat_ref,T_sat):\r\n \"\"\"\r\n Clausius-Clapeyron equation that relates saturation temperature to saturation\r\n pressure in a vapor phase given reference conditions *T_sat_ref* & *p_sat_ref*.\r\n \r\n **Inputs:**\r\n - *R_v*: ideal gas constant for the vapor phase [J/kgK]\r\n - *dh*: latent heat of vaporization/evaporation [J/kg]\r\n - *p_sat_ref*: saturation pressure at reference conditions [Pa]\r\n - *T_sat_ref*: saturation temperature at reference conditions [K]\r\n - *T_sat*: saturation temperature [K]\r\n **Outputs:**\r\n Saturation pressure for saturation temperature T_sat [Pa]\r\n \"\"\"\r\n return p_sat_ref*np.exp( (dh/R_v) * (1/T_sat_ref - 1/T_sat) )\r\n\r\n# Compute mass and volume of the vapor/liquid from the temp. & pressure data\r\ndef get_mass_and_volumes(t,Tl,Tg,pg,inert,fluid,slosh,ma_0,ml_0):\r\n \"\"\"\r\n Compute mass and volume of the vapor/liquid from the input temperature and \r\n pressure data. This is possible if initial inert gas and liquid mass are\r\n known. In addition, the ideal gas law is used as an assumption in the\r\n intermediate calculations.\r\n \r\n **Inputs:**\r\n - *t*: time array information [s]\r\n - *Tl*: liquid temperature [K]\r\n - *Tg*: ullage temperature [K]\r\n - *pg*: ullage pressure [Pa]\r\n - *R_a*: inert gas ideal gas constant [J/kgK]\r\n - *R_v*: vapor ideal gas constant [J/kgK]\r\n - *fluid*: object that contains all liquid and vapor properties\r\n - *inert*: object that contains all inert gas properties\r\n - *slosh*: object that contains all sloshing properties \r\n - *ma_0*: inert gas mass [kg]\r\n - *ml_0*: liquid mass [kg]\r\n **Outputs:**\r\n - *mv*: vapor mass over time [kg]\r\n - *ml*: liquid mass over time [kg]\r\n - *Vl*: liquid volume over time [m3]\r\n - *Vg*: vapor volume over time [m3]\r\n \"\"\"\r\n # Liquid density [kg/m3]\r\n rho_l = np.zeros(len(t))\r\n # Ideal gas costants for the vapor and inert gas [J/kg*K]\r\n R_a = inert.R_a\r\n R_v = fluid.R_v\r\n # Compute liquid density for each time-step based on temperature [kg/m3]\r\n for i in range(len(t)): rho_l[i] = fluid.get_liq_density(Tl[i])\r\n # Vapor pressure - ideal gas mixture [Pa]\r\n pv = pg - ma_0*R_a*Tg/slosh.V_g\r\n # Vapor mass - ideal gas law [kg]\r\n mv = pv*slosh.V_g/(R_v*Tg)\r\n # Liquid mass [kg]\r\n ml = np.zeros(len(t)); ml[0] = ml_0\r\n # Compute liquid mass for each time-step based on evaporation/condensation\r\n for i in range(len(t)-1): ml[i+1] = ml[i] - (mv[i+1] - mv[i]) \r\n # Liquid volume [m3]\r\n Vl = ml/rho_l\r\n # Ullage volume [m3]\r\n Vg = slosh.V - Vl\r\n # Return ullage/liquid mass & volume\r\n return mv, ml, Vl, Vg\r\n\r\n#%% Classes\r\n \r\n# Fluid (composed by liquid + vapor)\r\nclass Fluid:\r\n \"\"\"\r\n Initialize fluid properties for the vapor and liquid phases. This class is\r\n initialized by inserting the name of the working fluid (i.e. H2, N2 or \r\n HFE7200) and importing its properties from from a separate Python script.\r\n The H2 and N2 properties were obtained from the NIST (National Institute of\r\n Standards and Technology). The HFE7200 properties are more tricky to find,\r\n so they were derived from three distinct sources:\r\n - 3M Novec 7200 Engineered Fluid Product Information\r\n - Pramod Warrier and Amyn S. Teja [2011]\r\n - Rausch et. al [2015]\r\n **Constant fluid properties:**\r\n - *M_v*: Molar mass of the vapor species [kg/ml]\r\n - *R_v*: Ideal gas constant of the vapor [J/kgK]\r\n - *gamma*: Ratio of specific heats [-]\r\n - *T_sat_ref*: Reference saturation temperature [K]\r\n - *p_sat_ref*: Reference saturation pressure [Pa]\r\n **Temperature-dependent properties:**\r\n - *rho_l*: Polynomial coefficients for liquid density\r\n - *k_l*: Polynomial coefficients for liquid thermal conductivity\r\n - *cv_l*: Polynomial coefficients for liquid specific heat at constant volume\r\n - *mu_l*: Polynomial coefficients for liquid dynamic viscosity\r\n - *sigma*: Polynomial coefficients for the surface tension\r\n - *k_v*: Polynomial coefficients for the vapor thermal conductivity\r\n - *cv_v*: Polynomial coefficients for the vapor specific heat at constant volume\r\n - *mu_v*: Polynomial coefficients for the vapor dynamic viscosity\r\n - *dh*: Polynomial coefficients for the latent heat for vaporization/condensation\r\n The properties were obtained in saturation conditions for different temperatures.\r\n Then, they were fitted with a high order polynomial and the coefficients are\r\n stored in each respective variable.\r\n \"\"\"\r\n def __init__(self,name):\r\n print('Initializing fluid properties')\r\n \r\n # Initialization of the fluid properties at reference conditions\r\n self.name = name\r\n \r\n # Import fluid properties from database\r\n if name == 'H2': import Properties.h2_properties as data\r\n elif name == 'N2': import Properties.n2_properties as data\r\n elif name == 'HFE7200': import Properties.hfe7200_properties as data\r\n \r\n # Molar mass for the vapor species [kg/mol]\r\n self.M_v = data.M_v\r\n # Ratio of specific heats [-]\r\n self.gamma = data.gamma\r\n # Reference saturation temperature [K]\r\n self.T_sat_ref = data.T_sat_ref\r\n \r\n # Ideal gas constant [J/kg.K]\r\n self.R_v = 8.3144626/self.M_v\r\n \r\n # Maximum and minimum allowable temperatures [K]\r\n self.T_max = data.T_max\r\n self.T_min = data.T_min\r\n \r\n # NIST Fluid procedure\r\n if name == 'H2' or name == 'N2':\r\n # Reference saturation pressure [Pa]\r\n self.p_sat_ref = value_from_fit(x_data=data.T_v,\r\n y_data=data.p_v,\r\n n_pol=8,\r\n x_i=self.T_sat_ref)\r\n # Coefficients of the polynomials for the liquid properties\r\n self.rho_l = np.polyfit(x=data.T_l,y=data.rho_l, deg=8) # [kg/m3]\r\n self.k_l = np.polyfit(x=data.T_l,y=data.k_l, deg=8) # [W/mK]\r\n self.cv_l = np.polyfit(x=data.T_l,y=data.cv_l, deg=8) # [J/kgK]\r\n self.mu_l = np.polyfit(x=data.T_l,y=data.mu_l, deg=8) # [Pa*s]\r\n self.sigma = np.polyfit(x=data.T_l,y=data.sigma, deg=8) # [N/m]\r\n # Coeficcients of the polynomials for the gas properties\r\n self.k_v = np.polyfit(x=data.T_v,y=data.k_v, deg=8) # [W/mK]\r\n self.cv_v = np.polyfit(x=data.T_v,y=data.cv_v, deg=8) # [J/kgK]\r\n self.mu_v = np.polyfit(x=data.T_v,y=data.mu_v, deg=8) # [Pa*s]\r\n # Latent heat of vaporization [J/kg]\r\n self.dh = np.polyfit(x=data.T_v,\r\n y=data.hg-data.hf,deg=8)\r\n # HFE Fluid procedure\r\n elif name == 'HFE7200':\r\n # Reference saturation pressure [Pa]\r\n self.p_sat_ref = data.p_sat_ref\r\n # Coefficients of the polynomials for the liquid properties\r\n self.rho_l = np.polyfit(x=data.T_rausch, y=data.rho_l, deg=4) # [kg/m3]\r\n self.k_l = np.polyfit(x=data.T_warrier, y=data.k_l, deg=4) # [W/mK]\r\n self.cv_l = data.cv_l # [J/kgK]\r\n self.mu_l = np.polyfit(x=data.T_rausch, y=data.mu_l, deg=4) # [Pa*s]\r\n self.sigma = np.polyfit(x=data.T_rausch, y=data.sigma, deg=4) # [N/m]\r\n # Coeficcients of the polynomials for the gas properties\r\n self.k_v = data.k_v # [W/mK]\r\n self.cv_v = data.cv_v # [J/kgK]\r\n self.mu_v = np.polyfit(x=data.T_rausch,y=data.mu_v, deg=8) # [Pa*s]\r\n # Latent heat of vaporization [J/kg]\r\n self.dh = data.dh\r\n \r\n 'Get liquid properties for temperature Tl'\r\n def get_liq_properties(self,Tl):\r\n \"\"\"\r\n Compute liquid properties at a given temperature Tl. The input temperature\r\n is bounded based on the maximum and minimum temperatures presented in the\r\n fluid property database.\r\n \r\n **Inputs:**\r\n - *Tl:* liquid temperature [K]\r\n **Outputs:**\r\n - Bounded liquid temperature [K]\r\n - Liquid density [kg/m3]\r\n - Liquid thermal conductivity [W/mK]\r\n - Liquid specific heat at constant volume [J/kgK]\r\n - Liquid dynamic viscosity [Pa.s]\r\n - Surface tension [N/m]\r\n \"\"\"\r\n # Impose bounds on the temperature\r\n if Tl > self.T_max: Tl = self.T_max\r\n elif Tl < self.T_min: Tl = self.T_min\r\n # Return the values of the properties based on the temperature and fits\r\n return Tl,property_value(self.rho_l, Tl),\\\r\n property_value(self.k_l, Tl),\\\r\n property_value(self.cv_l, Tl),\\\r\n property_value(self.mu_l, Tl),\\\r\n property_value(self.sigma, Tl)\r\n \r\n 'Get vapor properties for temperature Tg' \r\n def get_vap_properties(self,Tg):\r\n \"\"\"\r\n Compute vapor properties at a given temperature Tg. The input temperature\r\n is bounded based on the maximum and minimum temperatures presented in the\r\n fluid property database.\r\n \r\n **Inputs:**\r\n - *Tg:* ullage temperature [K]\r\n **Outputs:**\r\n - Bounded ullage temperature [K]\r\n - Ideal gas constant for the vapor [J/kgK]\r\n - Reference saturation temperature [K]\r\n - Reference saturation pressure [Pa]\r\n - Vapor thermal conductivity [W/mK]\r\n - Vapor specific heat at constant volume [J/kgK]\r\n - Vapor dynamic viscosity [Pa.s]\r\n - Latent heat of vaporization/condensation [J/kg]\r\n \"\"\"\r\n # Impose bounds on the temperature\r\n if Tg > self.T_max: Tg = self.T_max\r\n elif Tg < self.T_min: Tg = self.T_min\r\n # Return the values of the properties based on the temperature and fits\r\n return Tg, self.R_v, \\\r\n self.T_sat_ref, self.p_sat_ref,\\\r\n property_value(self.k_v, Tg),\\\r\n property_value(self.cv_v, Tg),\\\r\n property_value(self.mu_v, Tg),\\\r\n property_value(self.dh, Tg)\r\n \r\n 'Get liquid density for temperature Ti'\r\n def get_liq_density(self,Ti):\r\n \"\"\"\r\n Compute liquid density at temperature Tl. The input temperature\r\n is bounded based on the maximum and minimum temperatures presented in the\r\n fluid property database.\r\n \r\n **Inputs:**\r\n - *Tl:* liquid temperature [K]\r\n **Outputs:**\r\n - Liquid density [kg/m3]\r\n \"\"\"\r\n # Impose bounds on the temperature\r\n if Ti > self.T_max: Ti = self.T_max\r\n elif Ti < self.T_min: Ti = self.T_min\r\n # Return the values of the properties based on the temperature and fits\r\n return property_value(self.rho_l, Ti)\r\n \r\n# Inert gas (used to pressurized the ullage)\r\nclass Inert:\r\n \"\"\"\r\n Initialize fluid properties for the inert gas phase. This class is\r\n initialized by inserting the name of the gas (i.e. He or Air) and importing\r\n its properties from from a separate Python script. The properties were\r\n obtained from the NIST (National Institute of Standards and Technology).\r\n \r\n **Constant fluid properties:**\r\n - *R_a*: Ideal gas constant of the inert gas [J/kgK]\r\n - *gamma*: Ratio of specific heats [-]\r\n **Temperature-dependent properties:**\r\n - *k*: Polynomial coefficients for the inert gas thermal conductivity\r\n - *cv*: Polynomial coefficients for the inert gas specific heat at constant volume\r\n - *mu*: Polynomial coefficients for the inert gas dynamic viscosity\r\n The properties were obtained in saturation conditions for different temperatures.\r\n Then, they were fitted with a high order polynomial and the coefficients are\r\n stored in each respective variable.\r\n \"\"\"\r\n def __init__(self,name):\r\n print('Initializing inert gas properties')\r\n \r\n # Initialization of the fluid properties at reference conditions\r\n self.name = name\r\n \r\n # Import fluid properties from database\r\n if name == 'He': import Properties.he_properties as data\r\n elif name == 'Air': import Properties.air_properties as data\r\n \r\n # Maximum and minimum allowable temperatures [K]\r\n self.T_max = data.T_max\r\n self.T_min = data.T_min\r\n \r\n # Ideal gas constant [J/kg.K]\r\n self.R_a = data.R\r\n # Ratio of specific heats [-]\r\n self.gamma = data.gamma\r\n # Thermal conductivity [W/mK]\r\n self.k = np.polyfit(x=data.T, y=data.k, deg=4)\r\n # Specific heat at constant volume [J/kgK]\r\n self.cv = np.polyfit(x=data.T, y=data.cv, deg=4)\r\n # Dynmic viscosity [Pa*s]\r\n self.mu = np.polyfit(x=data.T, y=data.mu, deg=4)\r\n \r\n 'Get inert gas properties for temperature Tg'\r\n def get_gas_properties(self,Tg):\r\n \"\"\"\r\n Compute inert gas properties at temperature Tg. The input temperature\r\n is bounded based on the maximum and minimum temperatures presented in the\r\n fluid property database.\r\n \r\n **Inputs:**\r\n - *Tg:* ullage temperature [K]\r\n **Outputs:**\r\n - Bounded ullage temperature [K]\r\n - Ideal gas constant for the inert gas [J/kgK]\r\n - Reference saturation temperature [K]\r\n - Reference saturation pressure [Pa]\r\n - Inert gas thermal conductivity [W/mK]\r\n - Inert gas specific heat at constant volume [J/kgK]\r\n - Inert gas dynamic viscosity [Pa.s]\r\n \"\"\"\r\n # Impose bounds on the temperature\r\n if Tg > self.T_max: Tg = self.T_max\r\n elif Tg < self.T_min: Tg = self.T_min\r\n # Return the values of the properties based on the temperature and fits\r\n return Tg, self.R_a, property_value(self.k, Tg),\\\r\n property_value(self.cv, Tg),\\\r\n property_value(self.mu, Tg)\r\n\r\n# Sloshing tank\r\nclass Slosh:\r\n \"\"\"\r\n Class that contains the sloshing cell dimensions and excitation parameters.\r\n \r\n **Inputs:**\r\n - *R*: cell radius [m]\r\n - *H*: total cell height [m]\r\n - *k_H*: cell fill ratio (liquid height over total height) [-]\r\n - *k_w*: non-dimensional excitation frequency (*f*/*f11*) [-]\r\n - *k_a*: non-dimensional excitation amplitude (*A0*/*R*) [-]\r\n **Calculated variables:**\r\n - *h*: liquid fill height [m]\r\n - *V*: total cell volume [m3]\r\n - *V_l*: liquid volume [m3]\r\n - *V_g*: ullage volume [m3]\r\n - *S_i*: cross-sectional area [m2]\r\n - *w11*: natural frequency [rad/s]\r\n - *f11*: natural frequency [Hz]\r\n - *Omega*: excitation frequency [rad/s]\r\n - *f*: excitation frequency [Hz]\r\n \"\"\"\r\n def __init__(self,R,H,k_h,k_w,k_a,g=9.8):\r\n # Sloshing cell radius converted from [mm] to [m]\r\n self.R = R*1e-3\r\n # Sloshing cell height converted from [mm] to [m]\r\n self.H = H*1e-3\r\n # Fill ratio of the liquid over the total height [-]\r\n self.k_h = k_h\r\n # Non-dimensional excitation frequency [-]\r\n self.k_w = k_w\r\n # Non-dimensional excitation amplitude [-]\r\n self.k_a = k_a\r\n # Liquid fill height [m]\r\n self.h = self.k_h*self.H\r\n # Excitation amplitude [m]\r\n self.A0 = self.k_a*self.R\r\n # Natural frequency [rad/s]\r\n self.w11 = np.sqrt((g*1.841/self.R)*np.tanh(1.841*self.h/self.R))\r\n # Natural frequency [Hz]\r\n self.f11 = self.w11/(2*np.pi)\r\n # Excitation frequency [rad/s]\r\n self.Omega = self.k_w*self.w11\r\n # Ecitation frequency [Hz]\r\n self.f = self.Omega/(2*np.pi)\r\n # Sloshing cell volume [m3]\r\n self.V = np.pi*self.R*self.R*self.H\r\n # Liquid volume [m3]\r\n self.V_l = (self.h/self.H)*self.V\r\n # Gas volume [m3]\r\n self.V_g = self.V - self.V_l\r\n # Cross-section area [m2]\r\n self.Si = np.pi*self.R*self.R\r\n \r\n 'Return sloshing cell dimensions'\r\n def get_sloshing_cell_dimensions(self):\r\n \"\"\"\r\n Return sloshing cell dimensions:\r\n - *R*: cell radius [m]\r\n - *H*: total cell height [m]\r\n - *h*: liquid height [m]\r\n - *V*: total cell volume [m3]\r\n - *Si*: cross-sectional area [m2]\r\n \"\"\"\r\n return self.R, self.H, self.h, self.V, self.Si\r\n \r\n 'Return sloshing cell parameters for 0D model'\r\n def get_sloshing_cell_params_0d(self):\r\n \"\"\"\r\n Return sloshing cell parameters required by the 0D model:\r\n - *V_l*: liquid volume [m3]\r\n - *V_g*: ullage volume [m3]\r\n - *Si*: cross-sectional area [m2]\r\n \"\"\"\r\n return self.V_l, self.V_g, self.Si\r\n \r\n 'Return excitation parameters'\r\n def get_excitation_conditions(self):\r\n \"\"\"\r\n Return sloshing excitation conditions:\r\n - *A0*: excitation amplitude [m]\r\n - *f11*: natural frequency [Hz]\r\n - *f*: excitation frequency [Hz]\r\n \"\"\"\r\n return self.A0, self.f11, self.f\r\n \r\n# Define class of Inputs (variables OBTAINED DIRECTLY from the input data)\r\nclass Inputs:\r\n \"\"\"\r\n Class that groups all the input variables together.\r\n \r\n **Inputs:**\r\n - *t*: time-array [s]\r\n - *Tl*: liquid temperature [K]\r\n - *Tg*: ullage temperature [K]\r\n - *pg*: ullage pressure [Pa]\r\n \"\"\"\r\n def __init__(self,t,Tl,Tg,pg):\r\n self.t = t # Time [s]\r\n self.Tl = Tl # Liq temperature [K]\r\n self.Tg = Tg # Gas temperature [K]\r\n self.pg = pg # Gas pressure [Pa]\r\n\r\n# Define class of Derived Inputs (variables COMPUTED from the input data)\r\nclass Derived_Inputs:\r\n \"\"\"\r\n Class that groups all the variables derived from the input temperature and\r\n pressure data.\r\n \r\n **Inputs:**\r\n - *t*: time-array [s]\r\n - *Tl*: liquid temperature [K]\r\n - *Tg*: ullage temperature [K]\r\n - *pg*: ullage pressure [Pa]\r\n - *fluid*: object that contains all liquid and vapor properties\r\n - *inert*: object that contains all inert gas properties\r\n - *slosh*: object that contains all sloshing properties \r\n - *ma_0*: initial mass of inert gas [kg]\r\n - *ml_0*: initial mass of liquid [kg]\r\n **Calculated variables:**\r\n - *mv*: vapor mass evolution over time [kg]\r\n - *ml*: liquid mass evolution over time [kg]\r\n - *Vl*: liquid volume over time [m3]\r\n - *Vg*: ullage volume over time [m3]\r\n \"\"\"\r\n def __init__(self,t,Tl,Tg,pg,inert,fluid,slosh,ma_0,ml_0):\r\n self.mv, self.ml,self.Vl, self.Vg = \\\r\n get_mass_and_volumes(t,Tl,Tg,pg,inert,fluid,slosh,ma_0,ml_0)\r\n\r\n#%% 0D model (ODE system)\r\n\r\n# 0D model for the temperature and pressure evolution\r\ndef model_0d(x,t,m_a,h_iL,h_m,V_l,V_g,fluid,inert,slosh):\r\n \"\"\"\r\n 0D model that predicts the evolution of the thermodynamic system inside of\r\n a closed reservoir when this is submitted to a sloshing excitation. The model\r\n is composed by two regions: the liquid and the ullage. The latter is a mixture\r\n between the vapor and the inert gas. The system is assumed to be closed with\r\n adiabatic walls.\r\n \r\n The model receives inputs regarding the initial pressure and temperature\r\n conditions as well as the heat and mass transfer coefficients between the\r\n gas and liquid. More detailed information is available in Technical Note\r\n TN5000-10-05 from the VKI cryogenics team.\r\n \r\n **Inputs:**\r\n - *x*: initial conditions for the system of ODEs\r\n - *t*: time interval [s]\r\n - *m_a*: mass of inert gas [kg]\r\n - *h_iL*: heat transfer coefficient [W/m2K]\r\n - *h_m*: mass transfer coefficient [m/s]\r\n - *V_l*: liquid volume [m3]\r\n - *V_g*: ullage volume [m3]\r\n - *fluid*: object that contains all liquid and vapor properties\r\n - *inert*: object that contains all inert gas properties\r\n - *slosh*: object that contains all sloshing properties \r\n **Outputs:**\r\n - *dmvdt*: temporal rate of change of vapor mass [kg/s]\r\n - *dTgdt*: temporal rate of change of ullage temperature [K/s]\r\n - *dTldt*: temporal rate of change of liquid temperature [K/s]\r\n - *dpgdt*: temporal rate of change of ullage pressure [Pa/s]\r\n \"\"\"\r\n \r\n ### Initial conditions\r\n m_v = x[0]; Tg = x[1]; Tl = x[2]; p_g = x[3]\r\n \r\n ### Import required properties/parameters\r\n \r\n # Liquid properties at temperature Tl\r\n _, rho_l, k_l, cv_l, mu_l, sigma = fluid.get_liq_properties(Tl)\r\n # Vapor properties at temperature Tg\r\n _, R_v, T_sat_ref, p_sat_ref, k_v, cv_v, mu_v, dh = fluid.get_vap_properties(Tg)\r\n # Inert gas properties at temperature Tg\r\n _, R_a, k_a, cv_a, mu_a = inert.get_gas_properties(Tg)\r\n \r\n # Sloshing cell dimensions required by the 0D model\r\n _, _, S_i = slosh.get_sloshing_cell_params_0d()\r\n \r\n ### Calculate required variables\r\n \r\n # Liquid mass at current time-step [kg]\r\n m_l = rho_l*V_l\r\n \r\n # Vapor density at current time-step [kg/m3]\r\n rho_v = m_v/V_g\r\n # Inert gas density at current time-step [kg/m3]\r\n rho_a = m_a/V_g\r\n \r\n # Thermal effusivity of the dry-air\r\n b_a = np.sqrt(rho_a*cv_a*k_a)\r\n # Thermal effusivity of the vapor\r\n b_v = np.sqrt(rho_v*cv_v*k_v)\r\n # Thermal effusitivity of the ullage (mass-averaged)\r\n b_g = (m_a*b_a + m_v*b_v)/(m_a + m_v)\r\n # Thermal effusivity of the liquid\r\n b_l = np.sqrt(rho_l*cv_l*k_l)\r\n \r\n # Interface temperature (semi-infinite body assumption) [K]\r\n Ti = (b_g*Tg + b_l*Tl)/(b_g+b_l)\r\n \r\n # Liquid density in saturation conditions at current time-step [kg/m3]\r\n rho_l_sat = fluid.get_liq_density(Ti)\r\n \r\n # Vapor phase pressure [Pa]\r\n p_v = p_g - rho_a*R_a*Tg\r\n # Vapor phase saturation pressure[Pa]\r\n p_v_sat = clausius_clapeyron_p(R_v,dh,p_sat_ref,T_sat_ref,Ti)\r\n \r\n ### 0D Model Equations:\r\n \r\n # 1. Mass at transfer at the interface\r\n dmvdt = h_m*S_i*( p_v_sat/(R_v*Ti) - p_v/(R_v*Tg) )\r\n \r\n # 2. Liquid internal energy balance\r\n dTldt = ((h_iL*S_i)/(m_l*cv_l))*(Ti - Tl) - (dmvdt/(m_l*cv_l))*( cv_l*np.abs(Ti-Tl) + p_v/rho_l_sat )\r\n \r\n # 3. Ullage internal energy balance\r\n dTgdt = - ((h_iL*S_i)/(m_a*cv_a+m_v*cv_v))*(Ti - Tl) + (dmvdt/(m_a*cv_a+m_v*cv_v))*( cv_v*np.abs(Ti-Tg) + np.abs(dh) + p_v/rho_l_sat)\r\n \r\n # 4. Ullage pressure evolution\r\n dpgdt = (1/V_g)*(m_a*R_a+m_v*R_v)*dTgdt + (R_v*Tg/V_g)*dmvdt\r\n \r\n return [dmvdt, dTgdt, dTldt, dpgdt] \r\n\r\n#%% Define cost function\r\n\r\ndef cost_function(X,t,inputs,derived_inputs,fluid,inert,slosh,ma_0,FOL_IN):\r\n \"\"\"\r\n Cost function that drives the optimization problem. The parameter we want to\r\n minimize is the overall relative error between the input pressure/temperature\r\n data and the predictions given by the 0D model.\r\n The cost function is evaluated several times with different values of the\r\n heat & mass transfer coefficients as inputs in order to obtain the minimum\r\n error in the prediction.\r\n \r\n **Inputs:**\r\n - *X*: initial estimate for the heat & mass transfer coefficients [W/m2K, m/s]\r\n - *t*: time array [s]\r\n - *inputs*: input data (liquid temp, ullage temp, ullage pressure)\r\n - *derived_inputs*: variables computed from input data (vapor/liquid mass & volumes)\r\n - *fluid*: object that contains all liquid and vapor properties\r\n - *inert*: object that contains all inert gas properties\r\n - *slosh*: object that contains all sloshing properties \r\n - *ma_0*: inert gas mass [kg]\r\n - *FOL_IN*: input folder location\r\n **Outputs**:\r\n - *err*: relative error between real data and model predictions\r\n \"\"\"\r\n \r\n # Function inputs\r\n h_iL = X[0] # Heat transfer coefficient\r\n h_m = X[1] # Mass transfer coefficient\r\n \r\n ### Declare variable arrays solved by the ODE system\r\n \r\n # Initial conditions obtained from the real data\r\n Tg = np.zeros(len(t)); Tg[0] = inputs.Tg[0] # [K]\r\n Tl = np.zeros(len(t)); Tl[0] = inputs.Tl[0] # [K]\r\n pg = np.zeros(len(t)); pg[0] = inputs.pg[0] # [pg]\r\n mv = np.zeros(len(t)); mv[0] = derived_inputs.mv[0] # [kg]\r\n # Declare array for the liquid mass evolution\r\n ml = np.zeros(len(t)); ml[0] = derived_inputs.ml[0] # [kg]\r\n # Declare array for ullage and liquid volume evolution\r\n Vg = np.zeros(len(t)); Vg[0] = derived_inputs.Vg[0] # [m3]\\\r\n Vl = np.zeros(len(t)); Vl[0] = derived_inputs.Vl[0] # [m3]\r\n \r\n print('Solving 0D model %s, %s for %s' %(fluid.name,inert.name,FOL_IN))\r\n print('-> Heat transfer coeff.: %f' %h_iL)\r\n print('-> Mass transfer coeff.: %f' %h_m)\r\n \r\n ### Run 0D model\r\n \r\n # Start timer for 0D model solver\r\n start_time = time.time()\r\n \r\n # Solve 0D model with current estimate of the heat & mass transfer coeffs.\r\n for i in range(len(t)-1):\r\n # Time-step\r\n ts = [t[i],t[i+1]]\r\n \r\n # Initial solution for the ODE\r\n x0 = [mv[i],Tg[i],Tl[i],pg[i]]\r\n \r\n # Solve system of ODEs for the current time-step\r\n x = odeint(model_0d, x0, ts, args=(ma_0,h_iL,h_m,Vl[i],Vg[i],fluid,inert,slosh))\r\n \r\n # Update vapor mass [kg]\r\n mv[i+1] = x[1,0]\r\n # Update ullage temperature [K]\r\n Tg[i+1] = x[1,1]\r\n # Update liquid temperature [K]\r\n Tl[i+1] = x[1,2]\r\n # Update ullage pressure [Pa]\r\n pg[i+1] = x[1,3]\r\n \r\n # Update liquid mass [kg]\r\n ml[i+1] = ml[i] - (mv[i+1] - mv[i])\r\n # Update liquid volume [m3]\r\n Vl[i+1] = ml[i+1]/fluid.get_liq_density(Tl[i+1])\r\n # Update ullage volume [m3]\r\n Vg[i+1] = slosh.V - Vl[i+1]\r\n \r\n ### Compute relative error between model predictions and input data\r\n err_Tl = np.linalg.norm((Tl - inputs.Tl)/inputs.Tl)\r\n err_Tg = np.linalg.norm((Tg - inputs.Tg)/inputs.Tg)\r\n err_pg = np.linalg.norm((pg - inputs.pg)/inputs.pg)\r\n \r\n # Root-mean square sum of the temperature/pressure relative errors\r\n err = np.sqrt(err_Tl**2 + err_Tg**2 + err_pg**2)\r\n print('-> Overall error: %f' %err)\r\n print('-> Elapsed time time: %f' %(time.time() - start_time))\r\n return err\r\n\r\n#%% Inverse method\r\ndef inverse_method(n_trials,p_test,t,Tg,Tl,pg,fluid,inert,slosh,ma_0,ml_0,X_0,optimizer_method,FOL_IN):\r\n \"\"\"\r\n Main function that is used to apply the inverse method. The goal is to\r\n compute the heat & mass transfer coefficients *h_heat* and *h_mass* which\r\n generate the temperature and pressure evolution observed in the input data.\r\n In order to handle noise in the data, a bootstrapping approach is taken.\r\n \r\n This function generates a distribution/population for the heat and mass\r\n transfer coefficients by comparing the input temperature & pressure evolution\r\n with predictions given by the 0D model.\r\n \r\n **Inputs:**\r\n - *n_trials*: number of optimization loops for bootstrapping\r\n - *p_test*: ratio of testing data to total data\r\n - *t*: input time array [s]\r\n - *Tg*: input ullage temperature [K]\r\n - *Tl*: input liquid temperature [K]\r\n - *pg*: input ullage pressure [Pa]\r\n - *fluid*: object that contains all liquid and vapor properties\r\n - *inert*: object that contains all inert gas properties\r\n - *slosh*: object that contains all sloshing properties \r\n - *ma_0*: initial inert gas mass [kg]\r\n - *ml_0*: initial liquid mass [kg]\r\n - *X_0*: initial estimate for heat & mass transfer coefficients [W/m2K,m/s]\r\n - *optimizer_method*: method for optimizer function (uses scipy.minimize())\r\n - *FOL_IN*: location where input data is stored\r\n **Outputs**:\r\n - *h_heat*: heat transfer coefficient distribution [W/m2K]\r\n - *h_mass*: mass transfer coefficient distribution [m/s]\r\n \"\"\"\r\n # Initialize heat transfer coefficient population\r\n h_heat = np.zeros(n_trials)\r\n # Initialzie mass transfer coefficient population\r\n h_mass = np.zeros(n_trials)\r\n \r\n # Apply inverse method \"n_trials\" times\r\n for j in range(n_trials):\r\n \r\n print('Split training and testing data')\r\n \r\n # Split arrays to have 70% training and 30% validation data\r\n t_train, _,\\\r\n Tg_train,_,\\\r\n Tl_train,_,\\\r\n pg_train,_ \\\r\n = train_test_split(t,Tg,Tl,pg,test_size=p_test)\r\n \r\n # Unsorted training data\r\n TRAIN_DATA = np.vstack((t_train,Tg_train,Tl_train,pg_train)).transpose()\r\n # Sorted training data based on time\r\n TRAIN_DATA = TRAIN_DATA[TRAIN_DATA[:,0].argsort()]\r\n \r\n # Split sorted train and testing arrays\r\n t_train = TRAIN_DATA[:,0] # [s]\r\n Tg_train = TRAIN_DATA[:,1] # [K]\r\n Tl_train = TRAIN_DATA[:,2] # [K]\r\n pg_train = TRAIN_DATA[:,3] # [Pa]\r\n \r\n # Assign inputs & derived inputs that are used in the cost function\r\n inputs = Inputs(t_train,Tl_train,Tg_train,pg_train)\r\n derived_inputs = Derived_Inputs(t_train,Tl_train,Tg_train,\r\n pg_train,inert,fluid,slosh,\r\n ma_0,ml_0)\r\n \r\n ### Optimization\r\n print('%s Method' %(optimizer_method))\r\n optimizer_time = time.time()\r\n # Optimization function (built-in from scipy)\r\n res = minimize(cost_function, # cost function\r\n X_0, # initial condition\r\n method = optimizer_method,\r\n args = (t_train,inputs,derived_inputs,fluid,inert,slosh,ma_0,FOL_IN),\r\n options= {'ftol': 1e-6, 'disp': True})\r\n # Print results and total elapsed time\r\n print('%s Result: h_heat = %f & h_m = %f' %(optimizer_method,res.x[0],res.x[1]))\r\n print('%s elapsed time time: %f' %(optimizer_method, time.time() - optimizer_time))\r\n # Store the computed coefficients\r\n h_heat[j] = res.x[0] # [W/m2K]\r\n h_mass[j] = res.x[1] # [m/s]\r\n return h_heat, h_mass\r\n\r\n\r\n#%% End" }, { "alpha_fraction": 0.606572151184082, "alphanum_fraction": 0.6253657341003418, "avg_line_length": 31.040891647338867, "blob_id": "3baa8e2f6e550d50a6395390f819006b6183c7fe", "content_id": "718d08d99aa5ded7d350ff304fee9f3c57b15dd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8886, "license_type": "no_license", "max_line_length": 96, "num_lines": 269, "path": "/inverse_trial_single_core.py", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 14 15:49:15 2021\r\n\r\n@author: Pedro Marques\r\n\"\"\"\r\n\r\n\"inverSe Method for nOisy nOn-isoThermal slosHIng Experiments (SMOOTHIE)\"\r\n\r\n#%% Packages\r\n\r\nimport numpy as np # Multiple reasons\r\nimport pandas as pd # Dataframe management for .csv & .txt files\r\nimport seaborn as sns # Plot smooth distribution\r\nimport matplotlib.pyplot as plt # Plots\r\nfrom matplotlib import gridspec # More control over subplot layout\r\n\r\n# Import Custom Classes \r\nfrom smoothie import Fluid,Inert,Slosh\r\n# Import Inverse Method function\r\nfrom smoothie import inverse_method\r\n\r\n#%% User INPUTS\r\n\r\nprint('Importing fluid properties')\r\n\r\n'Fluid and sloshing settings'\r\n# Fluids: 'H2', 'N2', 'HFE7200'\r\nfluid = Fluid(name='N2')\r\n# Inert gas: 'Air', 'He' \r\ninert = Inert(name='He')\r\n# Sloshing cell dimensions & excitations\r\n# R - radius [mm]\r\n# H - height [mm]\r\n# k_h - liquid fill ratio [-]\r\n# k_w - non-dimensional sloshing excitation [-]\r\n# k_a - non-dimensional sloshing amplitude [-]\r\nslosh = Slosh(R=40,H=104,k_h=0.7,k_w=0.8,k_a=0.06)\r\n\r\n'Input/Output data settings'\r\n# Folder where the input data is located\r\nFOL_IN = 'Input_Cryo_Data'\r\n# File that contains input data\r\nFILE_IN = 'model_data.txt'\r\n# Output folder location\r\nFOL_OUT = 'Output_Cryo_Data'\r\n# Save output data? [True/False]\r\nsave_OUT = False\r\n\r\n'Optimization settings'\r\n# Number of optimizations we will perform\r\nn_trials = 20\r\n# Percentage of validation/test data (default 30% validation and 70% training)\r\np_test = 0.3\r\n# Initial optimization conditions [heat-transfer coeff, mass-transfer coeff.]\r\nX_0 = [50,1e-4]\r\n# Select optimization method\r\noptimizer_method = 'Nelder-Mead'\r\n\r\n#%% Import data from 0D simulation\r\n\r\nprint('Importing temperature and pressure data')\r\n\r\n# Read simulation output file\r\ndf = pd.read_csv(format('%s/%s' %(FOL_IN,FILE_IN)))\r\n\r\n# Time-array [s]\r\nt = np.array(df['Time [s]'])\r\n# Ullage temperature [K]\r\nTg_clean = np.array(df['T_g [K]'])\r\n# Liquid temperature [K]\r\nTl_clean = np.array(df['T_l [K]'])\r\n# Interface temperature [K]\r\nTi_clean = np.array(df['T_i [K]'])\r\n# Ullage pressure [Pa]\r\npg_clean = np.array(df['p_g [Pa]'])\r\n\r\n# Initial liquid mass [kg]\r\nml_0 = np.array(df['ml [kg]'])[0]\r\n# Initial inert gas mass [kg]\r\nma_0 = np.array(df['ma [kg]'])[0]\r\n\r\n#%% Add random noise to the input data\r\n\r\n# Add Gaussian noise with mean 0 and variance 1\r\nTl = Tl_clean + 0.3*np.random.randn(len(t))\r\nTi = Ti_clean + 0.3*np.random.randn(len(t))\r\nTg = Tg_clean + 0.3*np.random.randn(len(t))\r\npg = pg_clean + 50*np.random.randn(len(t))\r\n\r\n# Create output folder if it does not exist\r\nif save_OUT == True:\r\n import os\r\n if not os.path.exists(FOL_OUT): os.mkdir(FOL_OUT)\r\n\r\n#%% Plot clean and noisy data for comparison\r\n\r\nprint('Plot temperature and pressure evolution')\r\n\r\n# Blue for cryogenic case\r\nif fluid.name == 'N2': color = 'C0'\r\n# Red for non-cryogenic case\r\nelif fluid.name == 'HFE7200': color = 'C3'\r\n\r\nplt.figure(figsize=(8,5))\r\ngs=gridspec.GridSpec(3, 2, width_ratios=[1, 1])\r\n\r\n# Ullage temperature\r\nplt.subplot(gs[0,0])\r\nplt.plot(t,Tg,linestyle='',marker='.',color=color,label='Noisy data')\r\nplt.plot(t,Tg_clean,linestyle='dashed',marker='',color='black',label='Clean data')\r\nplt.title('Ullage temperature')\r\nplt.ylabel('Temperature [K]')\r\nplt.xlabel('Time [s]')\r\nplt.xlim(t[0],t[-1])\r\nplt.grid()\r\nplt.legend()\r\nplt.tight_layout()\r\n\r\n# Interface temperature\r\nplt.subplot(gs[1,0])\r\nplt.plot(t,Ti,linestyle='',marker='.',color=color,label='Noisy data')\r\nplt.plot(t,Ti_clean,linestyle='dashed',marker='',color='black',label='Clean data')\r\nplt.title('Interface temperature')\r\nplt.ylabel('Temperature [K]')\r\nplt.xlabel('Time [s]')\r\nplt.xlim(t[0],t[-1])\r\nplt.legend()\r\nplt.grid()\r\nplt.tight_layout()\r\n\r\n# Liquid temperature\r\nplt.subplot(gs[2,0])\r\nplt.plot(t,Tl,linestyle='',marker='.',color=color,label='Noisy data')\r\nplt.plot(t,Tl_clean,linestyle='dashed',marker='',color='black',label='Clean data')\r\nplt.title('Liquid temperature')\r\nplt.ylabel('Temperature [K]')\r\nplt.xlabel('Time [s]')\r\nplt.xlim(t[0],t[-1])\r\nplt.legend()\r\nplt.grid()\r\nplt.tight_layout()\r\n\r\n# Ullage pressure\r\nplt.subplot(gs[:,1])\r\nplt.plot(t,pg/1e5,linestyle='',marker='.',color=color,label='Noisy data')\r\nplt.plot(t,pg_clean/1e5,linestyle='dashed',marker='',color='black',label='Clean data')\r\nplt.title('Ullage pressure')\r\nplt.ylabel('Pressure [bar]')\r\nplt.xlabel('Time [s]')\r\nplt.xlim(t[0],t[-1])\r\nplt.legend()\r\nplt.grid()\r\nplt.tight_layout()\r\nif save_OUT == True: plt.savefig('%s/noisy_data.pdf' %(FOL_OUT))\r\n\r\n#%% Inverse method for heat & mass transfer coeffs from temperature & pressure\r\n\r\nh_heat, h_mass = inverse_method(n_trials, # No. of trials for bootstrapping\r\n p_test, # Ratio of testing data [default: 30%]\r\n t, # Input data time array [s]\r\n Tg, # Noisy ullage temperature [K]\r\n Tl, # Noisy liquid temperature [K]\r\n pg, # Noisy ullage pressure [Pa]\r\n fluid, # Fluid properties class (liq. & vapour)\r\n inert, # Inert gas properties class\r\n slosh, # Sloshing cell & excitation properties class\r\n ma_0,ml_0, # Initial inert gas & liquid masss [kg]\r\n X_0, # Initial condition for heat & mass transf. coeffs\r\n optimizer_method, # Optimizer method\r\n FOL_IN # Input folder\r\n )\r\n\r\n#%% Compute statistics\r\n\r\n# Uncertainty (95% confidence assuming normal distribution)\r\nh_heat_unc = 1.96*np.std(h_heat); h_mass_unc = 1.96*np.std(h_mass)\r\n\r\nprint('h_heat = %e +- %e' %(np.mean(h_heat, dtype=np.float64),h_heat_unc))\r\nprint('h_mass = %e +- %e' %(np.mean(h_mass, dtype=np.float64),h_mass_unc))\r\n\r\n# Normalized variables\r\nh_heat_norm = (h_heat - np.mean(h_heat, dtype=np.float64))/np.std(h_heat)\r\nh_mass_norm = (h_mass - np.mean(h_mass, dtype=np.float64))/np.std(h_mass)\r\n# Create dataset matrix\r\nX = np.vstack((h_heat_norm,h_mass_norm))\r\n# Sample mean estimator\r\nX_mean = np.sum(X,axis=1)/n_trials\r\n# Covariance matrix estimator\r\nX_cov = np.cov(X)\r\n\r\n#%% Plot normalized statistics\r\n\r\n### Normalized histograms\r\nplt.figure(figsize=(10,5))\r\nplt.subplot(121)\r\nplt.title('Normalized histograms')\r\nplt.hist(h_heat_norm,label=r'$z_1=\\frac{h_{heat}-\\mu_{h_{heat}}}{\\sigma_{h_{heat}}}$',alpha=0.5)\r\nplt.hist(h_mass_norm,label=r'$z_2=\\frac{h_{mass}-\\mu_{h_{mass}}}{\\sigma_{h_{mass}}}$',alpha=0.5)\r\nplt.xlabel(r'$z_1,z_2$')\r\nplt.ylabel(r'$p(z_1),p(z_2)$')\r\nplt.legend()\r\nplt.tight_layout()\r\n\r\n# Required for Multivariate Gaussian\r\nfrom scipy.stats import multivariate_normal\r\n\r\n# Create great to plot Multivariate Gaussian\r\nX, Y = np.meshgrid(np.linspace(-3,3,300), np.linspace(-3,3,300))\r\nPOS = np.dstack((X, Y))\r\nRV = multivariate_normal(X_mean,X_cov)\r\n\r\n### Multivariate gaussian distribution\r\nplt.subplot(122)\r\nplt.title('Normalized Multivariate Gaussian Distribution')\r\nplt.contourf(X,Y,RV.pdf(POS),extend='both')\r\nplt.xlabel(r'$z_1$') # Normalized h_heat\r\nplt.ylabel(r'$z_2$') # Normalized h_mass\r\nplt.colorbar()\r\nplt.xlim(left=-3,right=3)\r\nplt.ylim(bottom=-3,top=3)\r\nplt.tight_layout()\r\nif save_OUT == True: plt.savefig('%s/stats_normalized.pdf' %(FOL_OUT))\r\n\r\n#%% Plot statistics\r\n\r\nplt.figure()\r\n# Histogram for heat transfer coefficient distribution [W/m2K]\r\nplt.subplot(211)\r\nsns.distplot(h_heat,\r\n hist=True,\r\n kde=True,\r\n color=color,\r\n hist_kws={'edgecolor':'black'},\r\n kde_kws={'linewidth':2,'shade': True})\r\nplt.xlabel(r'Heat transfer coefficient [W/m$^2$K]')\r\nplt.ylabel('Frequency')\r\nplt.title(r'Probabiliy density function for $h_{heat}$')\r\nplt.yticks([]) \r\nplt.tight_layout()\r\nplt.subplot(212)\r\n# Histogram for mass transfer coefficient distribution [m/s]\r\nsns.distplot(h_mass,\r\n hist=True,\r\n kde=True,\r\n color=color,\r\n hist_kws={'edgecolor':'black'},\r\n kde_kws={'linewidth':2,'shade': True})\r\nplt.xlabel(r'Mass transfer coefficient [m/s]')\r\nplt.ylabel('Frequency')\r\nplt.title(r'Probabiliy density function for $h_{mass}$')\r\nplt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\r\nplt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\r\nplt.yticks([]) \r\nplt.tight_layout()\r\nif save_OUT == True: plt.savefig('%s/stats.pdf' %(FOL_OUT))\r\n\r\n#%% Save population\r\nif save_OUT == True: \r\n # Header name for the .txt file\r\n header_name='h_heat[W/m2K],h_mass[m/s]'\r\n data = np.zeros((n_trials,2))\r\n data[:,0] = h_heat\r\n data[:,1] = h_mass\r\n np.savetxt(format('%s/coeffs_population.txt' %FOL_OUT),\r\n data,\r\n delimiter=\",\",\r\n comments='',\r\n header=header_name)" }, { "alpha_fraction": 0.7465155124664307, "alphanum_fraction": 0.7554357647895813, "avg_line_length": 56.86021423339844, "blob_id": "43472869fda08583fd82faa0030bf251c4d95010", "content_id": "c927b1773b5cde783beb8e8994e34829c2e3cff8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5381, "license_type": "no_license", "max_line_length": 720, "num_lines": 93, "path": "/Documentation/index.rst", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": ".. TSC Exam documentation master file, created by\n sphinx-quickstart on Tue Apr 13 00:56:40 2021.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nTSC Exam April 2021: Pedro Marques\n==================================\n\nThis documentation describes the SMOOTHIE (inverSe Method for nOisy nOn-isoThermal slosHIng Experiments) module that was developed in order to compute heat and mass transfer coefficients from non-isothermal sloshing data. \n\nThe direct application of this work is related to the sloshing of cryogenic propellants in the upper stages of modern spacecraft. Before take-off, the propellant tanks are pressurized, leading to a thermal stratification to set in the gas and liquid regions. As a consequence of sloshing, thermal mixing is observed between both fluid phases. This generates thermal gradients near the interface which can trigger evaporation or condensation effects. The balance of the aforementioned effects during the propelled flight phase of modern spacecraft results in undesirable pressure fluctuations, which can cause structural instabilities and thrust oscillations.\n\nThe current version of this code relies on synthetically generated time-resolved input data for the temperature and pressure evolution. This data was obtained from a 0D model developed in Technical Note TN5000-10-05 of the VKI cryogenics team, with known values for the heat and mass transfer coefficients. Thus, the purpose of this code is to verify if the inverse method works in predicting these coefficients from simply the analysing the input data, and what is the associated uncertainty when noise is added to the signals.\n\nWhat's included?\n----------------\n\nThe folder structure of this code is shown below:\n\n| **TSC_Exam_2021**\n| |_ Input_Cryo_Data\n| |____ 0D_data.pdf\n| |____ coeffs.txt\n| |____ model_data.txt\n| |_ Output_Cryo_Data\n| |____ coeffs_population.txt\n| |____ noisy_data.pdf\n| |____ stats.pdf\n| |____ stats_normalized.pdf\n| |_ Properties\n| |____ NIST\n| |________ Ar.txt\n| |________ GN2.txt\n| |________ H2.txt\n| |________ He.txt\n| |________ N2.txt\n| |________ O2.txt\n| |____ air_properties.py\n| |____ h2_properties.py\n| |____ he_properties.py\n| |____ hfe7200_properties.py\n| |____ n2_properties.py\n| |_ inverse_trial_single_core.py\n| |_ README.txt\n| |_ smoothie.py\n\nThe *Input_Cryo_Data* directory contains three files:\n\n* **0D_data.pdf**: plots for the input temperature and pressure data (synthetic data)\n* **coeffs.txt**: values of the heat and mass transfer coefficients used to generate the input synthetic data\n* **model_data.txt**: text file which contains the time-resolved input data (total pressure, liquid temperature, interface temperature, ullage temperature, initial liquid mass and initial inert gas mass)\n\nThe *Output_Cryo_Data* directory contains four files which are generated after running the *inverse_trial_single_core.py* script. These files are pre-included in the git repository since the code can take several hours to run. The content of these files is now described:\n\n* **coeffs_population.txt**: population of heat and mass transfer coefficients produced by the *inverse_trial_single_core.py* code\n* **noisy_data.pdf**: since the input data is synthetically generated, random noise is added as a form of pre-processing. This file shows the noisy data considered for the computation of the heat & mass transfer coefficients.\n* **stats.pdf**: plots showing the histograms and probability density functions of the heat and mass transfer coefficients estimated by the inverse method\n* **stats_normalized.pdf**: plots showing the normalized histograms and probability density functions of the heat and mass transfer coefficients, alongside the Multivariate Gaussian\n\nThe *Properties* directory contains one subdirectory named *NIST* and five additional python scripts. The *NIST* subdirectory contains text files with material properties for different fluids (i.e. Argon, Helium, Nitrogen, Oxygen) that were downloaded from the NIST database. The Python scripts in the *Properties* directory are used to compute the fluid properties required while running the *inverse_trial_single_core.py* code. Separate files were created for each fluid in order to allow for greater flexibility in accounting for different sources of information, or different approaches (e.g. N2, H2 and He are all obtained from the NIST database, whereas the HFE7200 properties are obtained from different sources).\n\nThe *inverse_trial_single_core.py* script is the main file that should be executed in order to determine the heat and mass transfer coefficients from the input data. The main outline of this script is explained step-by-step in the **Tutorial** section of the documentation.\n\nFinally, *smoothie.py* is the module that contains all the specific functions and classes required for the application of the inverse method to the non-isothermal sloshing problem. The full description of this module and its members is included in the **SMOOTHIE Module** section.\n\nRequirements:\n-------------\n\nThe following packages are required to run the code successfully:\n\n* numpy\n* time\n* matplotlib\n* scipy\n* sklearn\n* seaborn\n\nContents:\n---------\n\n.. toctree::\n :maxdepth: 2\n\n tutorial\n project\n code\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n" }, { "alpha_fraction": 0.7608018517494202, "alphanum_fraction": 0.7689762711524963, "avg_line_length": 66.50666809082031, "blob_id": "11539f1fd9bf7a068c50088f2ceda00c5bb9eb8b", "content_id": "0101764ed26748339625bcb13393797e99689ccd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 5140, "license_type": "no_license", "max_line_length": 710, "num_lines": 75, "path": "/README.txt", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "TSC Exam April 2021: Pedro Marques\r\n==================================\r\n\r\nThis documentation describes the SMOOTHIE (inverSe Method for nOisy nOn-isoThermal slosHIng Experiments) module that was developed in order to compute heat and mass transfer coefficients from non-isothermal sloshing data.\r\n\r\nThe direct application of this work is related to the sloshing of cryogenic propellants in the upper stages of modern spacecraft. Before take-off, the propellant tanks are pressurized, leading to a thermal stratification to set in the gas and liquid regions. As a consequence of sloshing, thermal mixing is observed between both fluid phases. This generates thermal gradients near the interface which can trigger evaporation or condensation effects. The balance of the aforementioned effects during the propelled flight phase of modern spacecraft results in undesirable pressure fluctuations, which can cause structural instabilities and thrust oscillations.\r\n\r\nThe current version of this code relies on synthetically generated time-resolved input data for the temperature and pressure evolution. This data was obtained from a 0D model developed in Technical Note TN5000-10-05 of the VKI cryogenics team, with known values for the heat and mass transfer coefficients. Thus, the purpose of this code is to verify if the inverse method works in predicting these coefficients from simply the analysing the input data, and what is the associated uncertainty when noise is added to the signals.\r\n\r\n\r\nWhat’s included?\r\n----------------\r\n\r\nThe folder structure of this code is shown below:\r\n\r\nTSC_Exam_2021\r\n|_ Input_Cryo_Data\r\n|____ 0D_data.pdf\r\n|____ coeffs.txt\r\n|____ model_data.txt\r\n|_ Output_Cryo_Data\r\n|____ coeffs_population.txt\r\n|____ noisy_data.pdf\r\n|____ stats.pdf\r\n|____ stats_normalized.pdf\r\n|_ Properties\r\n|____ NIST\r\n|________ Ar.txt\r\n|________ GN2.txt\r\n|________ H2.txt\r\n|________ He.txt\r\n|________ N2.txt\r\n|________ O2.txt\r\n|____ air_properties.py\r\n|____ h2_properties.py\r\n|____ he_properties.py\r\n|____ hfe7200_properties.py\r\n|____ n2_properties.py\r\n|_ Documentation_Index\r\n|_ inverse_trial_single_core.py\r\n|_ README.txt\r\n|_ smoothie.py\r\n\r\nThe Input_Cryo_Data directory contains three files:\r\n\r\n\t1. 0D_data.pdf: plots for the input temperature and pressure data (synthetic data)\r\n\t2. coeffs.txt: values of the heat and mass transfer coefficients used to generate the input synthetic data\r\n\t3. model_data.txt: text file which contains the time-resolved input data (total pressure, liquid temperature, interface temperature, ullage temperature, initial liquid mass and initial inert gas mass)\r\n\r\nThe Output_Cryo_Data directory contains four files which are generated after running the inverse_trial_single_core.py script. These files are pre-included in the git repository since the code can take several hours to run. The content of these files is now described:\r\n\r\n\t1. coeffs_population.txt: population of heat and mass transfer coefficients produced by the inverse_trial_single_core.py code\r\n\t2. noisy_data.pdf: since the input data is synthetically generated, random noise is added as a form of pre-processing. This file shows the noisy data considered for the computation of the heat & mass transfer coefficients.\r\n\t3. stats.pdf: plots showing the histograms and probability density functions of the heat and mass transfer coefficients estimated by the inverse method\r\n\t4. stats_normalized.pdf: plots showing the normalized histograms and probability density functions of the heat and mass transfer coefficients, alongside the Multivariate Gaussian\r\n\r\nThe Properties directory contains one subdirectory named NIST and five additional python scripts. The NIST subdirectory contains text files with material properties for different fluids (i.e. Argon, Helium, Nitrogen, Oxygen) that were downloaded from the NIST database. The Python scripts in the Properties directory are used to compute the fluid properties required while running the inverse_trial_single_core.py code. Separate files were created for each fluid in order to allow for greater flexibility in accounting for different sources of information, or different approaches (e.g. N2, H2 and He are all obtained from the NIST database, whereas the HFE7200 properties are obtained from different sources).\r\n\r\nThe inverse_trial_single_core.py script is the main file that should be executed in order to determine the heat and mass transfer coefficients from the input data. The main outline of this script is explained step-by-step in the Tutorial section of the documentation.\r\n\r\nFinally, smoothie.py is the module that contains all the specific functions and classes required for the application of the inverse method to the non-isothermal sloshing problem. The full description of this module and its members is included in the SMOOTHIE Module section.\r\n\r\nRequirements:\r\n-------------\r\n\r\nThe following packages are required to run the code successfully:\r\n\r\n numpy\r\n time\r\n matplotlib\r\n scipy\r\n sklearn\r\n seaborn\r\n\r\nRefer to the documentation file \"Documentation_Index\" (or Documentation/_build/html/index.html) for more detailed and complete information.\r\n" }, { "alpha_fraction": 0.412189245223999, "alphanum_fraction": 0.6006415486335754, "avg_line_length": 28.439023971557617, "blob_id": "5b5ebe4b30a15f0900d62cef04049ca7d4ed3491", "content_id": "a4cf7a7e27cdb07309f4205c459399186cc5b005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2494, "license_type": "no_license", "max_line_length": 107, "num_lines": 82, "path": "/Properties/hfe7200_properties.py", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "\"\"\"\r\nHFE7200 LIQUID AND VAPOUR PROPERTIES\r\n------------------------------------------------------------------------------\r\nSOURRCES:\r\n------------------------------------------------------------------------------\r\n- Pramod Warrier and Amyn S. Teja (2011), Viscosity, and Thermal Conductivity\r\nof Mixtures of 1-Ethoxy-1,1,2,2,3,3,4,4,4-nonafluorobutane (HFE 7200) with\r\nMethanol and 1-Ethoxybutane\r\n\r\n- Rausch et. al (2015), Density, Surface Tension, and Kinematic Viscosity of\r\nHydrofluoroethers HFE-7000, HFE-7100, HFE-7200, HFE-7300, and HFE-7500\r\n\r\n- 3M Novec 7200 Engineered Fluid Product Information\r\n------------------------------------------------------------------------------\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\n#%% Classes\r\n\r\n#%% Rausch 2015 data\r\n\r\n# Temperature (K) values considered by Rausch\r\nT_rausch = np.array([273.15,283.15,293.15,303.15,313.15,323.15,333.15,343.15,353.15,363.15,373.15])\r\n\r\n# Liquid density (kg/m3)\r\nrho_l = np.array([1478.07,1456.13,1433.91,1411.31,1388.26,1364.68,1340.47,1315.57,1289.87,1263.31,1235.79])\r\n\r\n# Vapour density (kg/m3)\r\nrho_v = np.array([0.61,0.96,1.47,2.18,3.15,4.47,6.22,8.5,11.44,15.19,19.94])\r\n\r\n# Vapour dynamic viscosity (Pa*s)\r\nmu_v = np.array([9.33,9.68,10.03,10.4,10.77,11.15,11.53,11.91,12.3,12.68,13.05])*10**(-6)\r\n\r\n# Liquid kinematic viscosity (m2/s)\r\nnu_l = np.array([0.6631,0.5563,0.4824,0.4292,0.3822,0.3429,0.3125,0.2826,0.2564,0.2382,0.2225])*10**(-6)\r\n\r\n# Liquid dynamic viscosity (Pa*s)\r\nmu_l = nu_l * rho_l\r\n\r\n# Surface tension (N/m)\r\nsigma = np.array([16.03,14.9,14.01,13.21,12.33,11.42,10.56,9.67,8.84,7.99,7.19])*10**(-3)\r\n\r\n#%% Warrier 2011 data\r\n\r\n# Temperature values considered by Warrier\r\nT_warrier = np.array([278.8,300.6,314.1,328.3,344.1])\r\n\r\n# Liquid thermal conductivity (W/mK)\r\nk_l = np.array([0.0712,0.0644,0.0616,0.059,0.0563])\r\n\r\n#%% 3M Novec HFE7200 Product information data\r\n\r\n# Enthalpy of vaporization\r\ndh = 30 * 4186.8 # J/kg\r\n\r\n# Liquid specific heat capacity (Cp = Cv = C)\r\ncv_l = 0.29 * 4186.8 # J/kgK\r\n\r\n# Saturation temperature at reference conditions\r\nT_sat_ref = 298.15 # K\r\n\r\n# Vapour pressure at reference conditions (T = 278.15 K)\r\np_sat_ref = 109 * 133.322 # Pa\r\n\r\n#%% Additional information\r\n\r\n# Molar mass of HFE7200\r\nM_v = 264e-03 # kg/mol\r\n\r\n# Ratio of specific heats\r\ngamma = 1.24\r\n\r\n# Vapor thermal conductivity\r\nk_v = 0.0112 # W/mK\r\n\r\n# Vapor specific heat at constant volume\r\ncv_v = 856.3 # J/kgK (HFE7000)\r\n\r\n#%% Maximum and mininum temperatures\r\nT_max = np.max(T_rausch)\r\nT_min = np.min(T_rausch)" }, { "alpha_fraction": 0.7525597214698792, "alphanum_fraction": 0.7525597214698792, "avg_line_length": 38.13333511352539, "blob_id": "f321f715680ffac1e17a54ecf24857f1fff65d7f", "content_id": "76d8dc0499f2d72455d9eee67c1d3b74cb08d45a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 586, "license_type": "no_license", "max_line_length": 202, "num_lines": 15, "path": "/Documentation/_build/html/_sources/project.rst.txt", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "Project Summary\n===============\n\nGoals Achieved\n--------------\n\n* Developed an algorithm to compute the heat and mass transfer coefficients from time-resolved temperature and pressure data in non-isothermal sloshing conditions (for a closed and adiabatic reservoir).\n* Learned how to create documentation for Python code with the Sphinx tool.\n* Learned how to use git and uploaded the project files alongside their documentation\n\nLessons Learned\n---------------\n\n* Sphinx is a very friendly tool to create documentation for Python codes\n* Classes are very useful and make my life easier" }, { "alpha_fraction": 0.4920470118522644, "alphanum_fraction": 0.5501382946968079, "avg_line_length": 31.264368057250977, "blob_id": "4736e068ba64caa1f72f0ebf6050024f07450fd5", "content_id": "f43a3fae6d6fc54d55dcb9650f5319ecf81e958c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2892, "license_type": "no_license", "max_line_length": 77, "num_lines": 87, "path": "/Properties/air_properties.py", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\n\r\n#%% Fluid choice and desired pressure\r\n\r\nclass N2:\r\n # Molar fraction in air\r\n x = 0.78084\r\n pass\r\nclass O2:\r\n # Molar fraction in air\r\n x = 0.20946\r\n pass\r\nclass Ar:\r\n # Molar fraction in air\r\n x = 0.00934\r\n pass\r\n\r\n# NIST Properties column\r\n # 00 - Temperature (K)\r\n # 01 - Pressure (MPa)\r\n # 02 - GAS Density\r\n # 03 - GAS Specific volume\r\n # 04 - GAS Internal Energy\r\n # 05 - GAS Enthalpy\r\n # 06 - GAS Entropy \r\n # 07 - GAS Cv\r\n # 08 - GAS Cp\r\n # 09 - GAS Sound speed\r\n # 10 - GAS Joule-Thomson\r\n # 11 - GAS Dynamic viscosity\r\n # 12 - GAS Thermal conductivity\r\n # 13 - GAS Phase\r\n\r\n#%% Import file properties into data frame\r\n\r\n### N2\r\ndata_frame = pd.read_csv('Properties/NIST/GN2.txt', sep='\t',dtype=np.float64)\r\nN2.T = data_frame.values[:,0] # Saturation temperature (K)\r\nN2.p = data_frame.values[:,1]*1e6 # Saturation pressure (MPa)\r\nN2.rho = data_frame.values[:,2] # kg/m3\r\nN2.hf = data_frame.values[:,5]*1e3 # J/kg\r\nN2.cv = data_frame.values[:,7]*1e3 # J/kgK\r\nN2.cp = data_frame.values[:,8]*1e3 # J/kgK\r\nN2.mu = data_frame.values[:,11] # Pa*s\r\nN2.k = data_frame.values[:,12] # W/mK\r\n\r\n### O2\r\ndata_frame = pd.read_csv('Properties/NIST/O2.txt', sep='\t',dtype=np.float64)\r\nO2.T = data_frame.values[:,0] # Saturation temperature (K)\r\nO2.p = data_frame.values[:,1]*1e6 # Saturation pressure (MPa)\r\nO2.rho = data_frame.values[:,2] # kg/m3\r\nO2.hf = data_frame.values[:,5]*1e3 # J/kg\r\nO2.cv = data_frame.values[:,7]*1e3 # J/kgK\r\nO2.cp = data_frame.values[:,8]*1e3 # J/kgK\r\nO2.mu = data_frame.values[:,11] # Pa*s\r\nO2.k = data_frame.values[:,12] # W/mK\r\n\r\n### Ar\r\ndata_frame = pd.read_csv('Properties/NIST/Ar.txt', sep='\t',dtype=np.float64)\r\nAr.T = data_frame.values[:,0] # Saturation temperature (K)\r\nAr.p = data_frame.values[:,1]*1e6 # Saturation pressure (MPa)\r\nAr.rho = data_frame.values[:,2] # kg/m3\r\nAr.hf = data_frame.values[:,5]*1e3 # J/kg\r\nAr.cv = data_frame.values[:,7]*1e3 # J/kgK\r\nAr.cp = data_frame.values[:,8]*1e3 # J/kgK\r\nAr.mu = data_frame.values[:,11] # Pa*s\r\nAr.k = data_frame.values[:,12] # W/mK\r\n\r\n# Same temperature array for all test cases\r\nT = N2.T \r\n\r\n## Air properties from average composition of air\r\nrho = N2.x*N2.rho + O2.x*O2.rho + Ar.x*Ar.rho\r\ncv = N2.x*N2.cv + O2.x*O2.cv + Ar.x*Ar.cv\r\ncp = N2.x*N2.cp + O2.x*O2.cp + Ar.x*Ar.cp\r\nmu = N2.x*N2.mu + O2.x*O2.mu + Ar.x*Ar.mu\r\nk = N2.x*N2.k + O2.x*O2.k + Ar.x*Ar.k\r\n\r\n#%% Additional properties\r\n\r\nR = 8.3144626/(28.97e-03) # J/kg.K \r\ngamma = 1.4\r\n\r\n#%% Maximum and mininum temperatures\r\nT_max = np.max(T)\r\nT_min = np.min(T)" }, { "alpha_fraction": 0.6781652569770813, "alphanum_fraction": 0.6929222941398621, "avg_line_length": 36.06271743774414, "blob_id": "aa3a2cc0ee5a9001dcf485ff213e24d67d101fae", "content_id": "4950b38e6391bdbd6a38c7d118991511c169d43b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 10639, "license_type": "no_license", "max_line_length": 397, "num_lines": 287, "path": "/Documentation/tutorial.rst", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "Tutorial\n========\n\nThis section describes the application of the SMOOTHIE module presented in the *inverse_trial_single_core.py* script.\nThe first step is to import the required packages, which is shown in the code block below. ::\n\t\n\timport numpy as np # Multiple reasons\n\timport pandas as pd # Dataframe management for .csv & .txt files\n\timport seaborn as sns # Plot smooth distribution\n\timport matplotlib.pyplot as plt # Plots\n\tfrom matplotlib import gridspec # More control over subplot layout\n\n\t# Import Custom Classes \n\tfrom smoothie import Fluid,Inert,Slosh\n\t# Import Inverse Method function\n\tfrom smoothie import inverse_method\n\nUser settings\n-------------\n\nThen, the user should load the fluid properties for the liquid, the vapor and the inert gas phases. This is achieved with the *Fluid()* and *Inert()* classes loaded from the SMOOTHIE module. In addition, the sloshing container and excitation conditions must also be specified by creating an object from the *Slosh()* class. ::\n\n\t'Fluid and sloshing settings'\n\t# Fluids: 'H2', 'N2', 'HFE7200'\n\tfluid = Fluid(name='N2')\n\t# Inert gas: 'Air', 'He' \n\tinert = Inert(name='He')\n\t# Sloshing cell dimensions & excitations\n\t# R - radius [mm]\n\t# H - height [mm]\n\t# k_h - liquid fill ratio [-]\n\t# k_w - non-dimensional sloshing excitation [-]\n\t# k_a - non-dimensional sloshing amplitude [-]\n\tslosh = Slosh(R=40,H=104,k_h=0.7,k_w=0.8,k_a=0.06)\n\nThe directory where the input data is stored *FOL_IN* should be specified alongside the input file name *FILE_IN*. The user can also specify if the output of the script should be saved or not *save_OUT = True/False* and the corresponding save location *FOL_OUT*. ::\n\n\t'Input/Output data settings'\n\t# Folder where the input data is located\n\tFOL_IN = 'Input_Cryo_Data'\n\t# File that contains input data\n\tFILE_IN = 'model_data.txt'\n\t# Output folder location\n\tFOL_OUT = 'Output_Cryo_Data'\n\t# Save output data? [True/False]\n\tsave_OUT = False\n\nThe final group of settings is related to the optimization settings that must be carried out to compute the missing coefficients. Here the total number of optimzations *n_trials* can be specified, as well as the percentage of validation/training data *p_test*, initial estimates for the heat and mass transfer coefficients *X_0* and, finally, which optimizer *optimizer_method* should be used. :: \n\n\t'Optimization settings'\n\t# Number of optimizations we will perform\n\tn_trials = 20\n\t# Percentage of validation/test data (default 30% validation and 70% training)\n\tp_test = 0.3\n\t# Initial optimization conditions [heat-transfer coeff, mass-transfer coeff.]\n\tX_0 = [50,1e-4]\n\t# Select optimization method\n\toptimizer_method = 'Nelder-Mead'\n\t\nPre-processing\n--------------\n\nThe pre-processing consists of importing the time-resolved temperature and pressure data and introducing random noise in order to simulate the disturbances that come from experimental measurement techniques. ::\n\n\t#%% Import data from 0D simulation\n\n\t# Read simulation output file\n\tdf = pd.read_csv(format('%s/%s' %(FOL_IN,FILE_IN)))\n\t\n\t# Time-array [s]\n\tt = np.array(df['Time [s]'])\n\t# Ullage temperature [K]\n\tTg_clean = np.array(df['T_g [K]'])\n\t# Liquid temperature [K]\n\tTl_clean = np.array(df['T_l [K]'])\n\t# Interface temperature [K]\n\tTi_clean = np.array(df['T_i [K]'])\n\t# Ullage pressure [Pa]\n\tpg_clean = np.array(df['p_g [Pa]'])\n\n\t# Initial liquid mass [kg]\n\tml_0 = np.array(df['ml [kg]'])[0]\n\t# Initial inert gas mass [kg]\n\tma_0 = np.array(df['ma [kg]'])[0]\n\n\t#%% Add random noise to the input data\n\t# Add Gaussian noise with mean 0 and variance 1\n\tTl = Tl_clean + 0.3*np.random.randn(len(t))\n\tTi = Ti_clean + 0.3*np.random.randn(len(t))\n\tTg = Tg_clean + 0.3*np.random.randn(len(t))\n\tpg = pg_clean + 50*np.random.randn(len(t))\n\nThe noisy data can then be plotted in order to check if the synthethic signal-to-noise ratio is at an acceptable level. ::\n\n\t# Blue for cryogenic case\n\tif fluid.name == 'N2': color = 'C0'\n\t# Red for non-cryogenic case\n\telif fluid.name == 'HFE7200': color = 'C3'\n\n\tplt.figure(figsize=(8,5))\n\tgs=gridspec.GridSpec(3, 2, width_ratios=[1, 1])\n\n\t# Ullage temperature\n\tplt.subplot(gs[0,0])\n\tplt.plot(t,Tg,linestyle='',marker='.',color=color,label='Noisy data')\n\tplt.plot(t,Tg_clean,linestyle='dashed',marker='',color='black',label='Clean data')\n\tplt.title('Ullage temperature')\n\tplt.ylabel('Temperature [K]')\n\tplt.xlabel('Time [s]')\n\tplt.xlim(t[0],t[-1])\n\tplt.grid()\n\tplt.legend()\n\tplt.tight_layout()\n\n\t# Interface temperature\n\tplt.subplot(gs[1,0])\n\tplt.plot(t,Ti,linestyle='',marker='.',color=color,label='Noisy data')\n\tplt.plot(t,Ti_clean,linestyle='dashed',marker='',color='black',label='Clean data')\n\tplt.title('Interface temperature')\n\tplt.ylabel('Temperature [K]')\n\tplt.xlabel('Time [s]')\n\tplt.xlim(t[0],t[-1])\n\tplt.legend()\n\tplt.grid()\n\tplt.tight_layout()\n\n\t# Liquid temperature\n\tplt.subplot(gs[2,0])\n\tplt.plot(t,Tl,linestyle='',marker='.',color=color,label='Noisy data')\n\tplt.plot(t,Tl_clean,linestyle='dashed',marker='',color='black',label='Clean data')\n\tplt.title('Liquid temperature')\n\tplt.ylabel('Temperature [K]')\n\tplt.xlabel('Time [s]')\n\tplt.xlim(t[0],t[-1])\n\tplt.legend()\n\tplt.grid()\n\tplt.tight_layout()\n\n\t# Ullage pressure\n\tplt.subplot(gs[:,1])\n\tplt.plot(t,pg/1e5,linestyle='',marker='.',color=color,label='Noisy data')\n\tplt.plot(t,pg_clean/1e5,linestyle='dashed',marker='',color='black',label='Clean data')\n\tplt.title('Ullage pressure')\n\tplt.ylabel('Pressure [bar]')\n\tplt.xlabel('Time [s]')\n\tplt.xlim(t[0],t[-1])\n\tplt.legend()\n\tplt.grid()\n\tplt.tight_layout()\n\tif save_OUT == True: plt.savefig('%s/noisy_data.pdf' %(FOL_OUT))\n\nInverse-method\n--------------\n\nThe algorithm for the computation of the 'unknown' coefficients from the temperature and pressure data is accomplished through the *inverse_method()* function from the SMOOTHIE module. Refer to the **SMOOTHIE Module** section of the documentation for an in-depth analysis of the different elements which drive this approach. ::\n\n\t#%% Inverse method for heat & mass transfer coeffs from temperature & pressure\n\n\th_heat, h_mass = inverse_method(n_trials, # No. of trials for bootstrapping\n\t\t\t\t\tp_test, # Ratio of testing data [default: 30%]\n\t\t\t\t\tt, # Input data time array [s]\n\t\t\t\t\tTg, # Noisy ullage temperature [K]\n\t\t\t\t\tTl, # Noisy liquid temperature [K]\n\t\t\t\t\tpg, # Noisy ullage pressure [Pa]\n\t\t\t\t\tfluid, # Fluid properties class (liq. & vapour)\n\t\t\t\t\tinert, # Inert gas properties class\n\t\t\t\t\tslosh, # Sloshing cell & excitation properties class\n\t\t\t\t\tma_0,ml_0, # Initial inert gas & liquid masss [kg]\n\t\t\t\t\tX_0, # Initial condition for heat & mass transf. coeffs\n\t\t\t\t\toptimizer_method, # Optimizer method\n\t\t\t\t\tFOL_IN # Input folder\n\t\t\t\t\t)\n\nThe obtained heat and mass transfer population can be stored for later use/analysis. ::\n\n\tif save_OUT == True: \n\t\t# Header name for the .txt file\n\t\theader_name='h_heat[W/m2K],h_mass[m/s]'\n\t\tdata = np.zeros((n_trials,2))\n\t\tdata[:,0] = h_heat\n\t\tdata[:,1] = h_mass\n\t\tnp.savetxt(format('%s/coeffs_population.txt' %FOL_OUT),\n\t\t\t\t\t data,\n\t\t\t\t\t delimiter=\",\",\n\t\t\t\t\t comments='',\n\t\t\t\t\t header=header_name)\n\nCompute statistics of the population\n------------------------------------\n\nThe previous step generates a population/distribution for the possible heat and mass transfer coefficients which minimize the error between the model predictions and the noisy input data. \nAs a form of analysing these distributions, the following quantities are computed:\n\n* Uncertainty in each coefficient (assuming Gaussian distributions) for a 95% confidence interval\n* Mean prediction for each coefficient\n* Covariance between the heat and mass transfer coefficients\n\nThese processes are shown in the code block below. ::\n\n\t#%% Compute statistics\n\n\t# Uncertainty (95% confidence assuming normal distribution)\n\th_heat_unc = 1.96*np.std(h_heat); h_mass_unc = 1.96*np.std(h_mass)\n\n\tprint('h_heat = %e +- %e' %(np.mean(h_heat, dtype=np.float64),h_heat_unc))\n\tprint('h_mass = %e +- %e' %(np.mean(h_mass, dtype=np.float64),h_mass_unc))\n\n\t# Normalized variables\n\th_heat_norm = (h_heat - np.mean(h_heat, dtype=np.float64))/np.std(h_heat)\n\th_mass_norm = (h_mass - np.mean(h_mass, dtype=np.float64))/np.std(h_mass)\n\t# Create dataset matrix\n\tX = np.vstack((h_heat_norm,h_mass_norm))\n\t# Sample mean estimator\n\tX_mean = np.sum(X,axis=1)/n_trials\n\t# Covariance matrix estimator\n\tX_cov = np.cov(X)\n\nPlot statistics of the population\n---------------------------------\n\nThe analysis can be represented graphically as shown below (in normalized and non-normalized forms). ::\n\n\t#%% Plot normalized statistics\n\t\n\t### Normalized histograms\n\tplt.figure(figsize=(10,5))\n\tplt.subplot(121)\n\tplt.title('Normalized histograms')\n\tplt.hist(h_heat_norm,label=r'$z_1=\\frac{h_{heat}-\\mu_{h_{heat}}}{\\sigma_{h_{heat}}}$',alpha=0.5)\n\tplt.hist(h_mass_norm,label=r'$z_2=\\frac{h_{mass}-\\mu_{h_{mass}}}{\\sigma_{h_{mass}}}$',alpha=0.5)\n\tplt.xlabel(r'$z_1,z_2$')\n\tplt.ylabel(r'$p(z_1),p(z_2)$')\n\tplt.legend()\n\tplt.tight_layout()\n\n\t# Required for Multivariate Gaussian\n\tfrom scipy.stats import multivariate_normal\n\n\t# Create great to plot Multivariate Gaussian\n\tX, Y = np.meshgrid(np.linspace(-3,3,300), np.linspace(-3,3,300))\n\tPOS = np.dstack((X, Y))\n\tRV = multivariate_normal(X_mean,X_cov)\n\n\t### Multivariate gaussian distribution\n\tplt.subplot(122)\n\tplt.title('Normalized Multivariate Gaussian Distribution')\n\tplt.contourf(X,Y,RV.pdf(POS),extend='both')\n\tplt.xlabel(r'$z_1$') # Normalized h_heat\n\tplt.ylabel(r'$z_2$') # Normalized h_mass\n\tplt.colorbar()\n\tplt.xlim(left=-3,right=3)\n\tplt.ylim(bottom=-3,top=3)\n\tplt.tight_layout()\n\tif save_OUT == True: plt.savefig('%s/stats_normalized.pdf' %(FOL_OUT))\n\n\t#%% Plot non-normalized statistics\n\t\n\tplt.figure()\n\t# Histogram for heat transfer coefficient distribution [W/m2K]\n\tplt.subplot(211)\n\tsns.distplot(h_heat,\n\t\t\t hist=True,\n\t\t\t kde=True,\n\t\t\t color=color,\n\t\t\t hist_kws={'edgecolor':'black'},\n\t\t\t kde_kws={'linewidth':2,'shade': True})\n\tplt.xlabel(r'Heat transfer coefficient [W/m$^2$K]')\n\tplt.ylabel('Frequency')\n\tplt.title(r'Probabiliy density function for $h_{heat}$')\n\tplt.yticks([]) \n\tplt.tight_layout()\n\tplt.subplot(212)\n\t# Histogram for mass transfer coefficient distribution [m/s]\n\tsns.distplot(h_mass,\n\t\t\t hist=True,\n\t\t\t kde=True,\n\t\t\t color=color,\n\t\t\t hist_kws={'edgecolor':'black'},\n\t\t\t kde_kws={'linewidth':2,'shade': True})\n\tplt.xlabel(r'Mass transfer coefficient [m/s]')\n\tplt.ylabel('Frequency')\n\tplt.title(r'Probabiliy density function for $h_{mass}$')\n\tplt.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(0,0))\n\tplt.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,0))\n\tplt.yticks([]) \n\tplt.tight_layout()\n\tif save_OUT == True: plt.savefig('%s/stats.pdf' %(FOL_OUT))\n\t\n" }, { "alpha_fraction": 0.5181102156639099, "alphanum_fraction": 0.5677165389060974, "avg_line_length": 26.266666412353516, "blob_id": "556fd61914e30a44af139f804c5d8cd42ec554f7", "content_id": "d648eed1a3c04cd83a19d787dca46bd98547ddad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1270, "license_type": "no_license", "max_line_length": 69, "num_lines": 45, "path": "/Properties/he_properties.py", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\n\r\n#%% Fluid choice and desired pressure\r\n\r\nfolder = 'Properties/NIST/'\r\nname = \"He.txt\"\r\n\r\n# NIST Properties column\r\n # 00 - Temperature (K)\r\n # 01 - Pressure (MPa)\r\n # 02 - GAS Density\r\n # 03 - GAS Specific volume\r\n # 04 - GAS Internal Energy\r\n # 05 - GAS Enthalpy\r\n # 06 - GAS Entropy \r\n # 07 - GAS Cv\r\n # 08 - GAS Cp\r\n # 09 - GAS Sound speed\r\n # 10 - GAS Joule-Thomson\r\n # 11 - GAS Dynamic viscosity\r\n # 12 - GAS Thermal conductivity\r\n # 13 - GAS Phase\r\n\r\n#%% Import file properties into data frame\r\ndata_frame = pd.read_csv(folder+name, sep='\t',dtype=np.float64)\r\n\r\n# Gas properties (NIST)\r\nT = data_frame.values[:,0] # Saturation temperature (K)\r\np = data_frame.values[:,1]*1e6 # Saturation pressure (MPa)\r\nrho = data_frame.values[:,2] # kg/m3\r\nhf = data_frame.values[:,5]*1e3 # J/kg\r\ncv = data_frame.values[:,7]*1e3 # J/kgK\r\ncp = data_frame.values[:,8]*1e3 # J/kgK\r\nmu = data_frame.values[:,11] # Pa*s\r\nk = data_frame.values[:,12] # W/mK\r\n\r\n#%% Additional properties\r\n\r\nR = 8.3144626/(4e-03) # J/kg.K \r\ngamma = 1.66\r\n\r\n#%% Maximum and mininum temperatures\r\nT_max = np.max(T)\r\nT_min = np.min(T)" }, { "alpha_fraction": 0.5162407755851746, "alphanum_fraction": 0.5625811815261841, "avg_line_length": 31.014286041259766, "blob_id": "a8b0adb1be9a7dfc9076b9480c94245a62e7d48f", "content_id": "76e923de28ced405d5320d81f75faaf51a5e47a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2309, "license_type": "no_license", "max_line_length": 74, "num_lines": 70, "path": "/Properties/n2_properties.py", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport pandas as pd\r\n\r\n#%% Fluid choice and desired pressure\r\n\r\nfolder = 'Properties/NIST/'\r\nname = \"N2.txt\"\r\n\r\n# NIST Properties column\r\n # 00 - Temperature (K)\r\n # 01 - Pressure (MPa)\r\n # 02 - LIQUID Density\r\n # 03 - LIQUID Specific volume\r\n # 04 - LIQUID Internal Energy\r\n # 05 - LIQUID Enthalpy\r\n # 06 - LIQUID Entropy \r\n # 07 - LIQUID Cv\r\n # 08 - LIQUID Cp\r\n # 09 - LIQUID Sound speed\r\n # 10 - LIQUID Joule-Thomson\r\n # 11 - LIQUID Dynamic viscosity\r\n # 12 - LIQUID Thermal conductivity\r\n # 13 - LIQUID Surface tension\r\n # 14 - GAS Density\r\n # 15 - GAS Specific volume\r\n # 16 - GAS Internal energy\r\n # 17 - GAS Enthalpy\r\n # 18 - GAS Entropy \r\n # 19 - GAS Cv\r\n # 20 - GAS Cp\r\n # 21 - GAS Sound speed\r\n # 22 - GAS Joule-Thomson\r\n # 23 - GAS Dynamic viscosity\r\n # 24 - GAS Thermal conductivity\r\n\r\n#%% Import file properties into data frame\r\ndata_frame = pd.read_csv(folder+name, sep='\t',dtype=np.float64)\r\n\r\n# Liquid properties (NIST)\r\nT_l = data_frame.values[:,0] # Saturation temperature (K)\r\np_l = data_frame.values[:,1]*1e6 # Saturation pressure (Pa)\r\nrho_l = data_frame.values[:,2] # kg/m3\r\nhf = data_frame.values[:,5]*1e3 # J/kg\r\ncv_l = data_frame.values[:,7]*1e3 # J/kgK\r\ncp_l = data_frame.values[:,8]*1e3 # J/kgK\r\nmu_l = data_frame.values[:,11] # Pa*s\r\nk_l = data_frame.values[:,12] # W/mK\r\nsigma = data_frame.values[:,13] # N/m\r\n\r\n# Gas properties (NIST)\r\nT_v = data_frame.values[:,0] # Saturation temperature (K)\r\np_v = data_frame.values[:,1]*1e6 # Saturation pressure (Pa)\r\nrho_v = data_frame.values[:,14] # kg/m3\r\nhg = data_frame.values[:,17]*1e3 # J/kg\r\ncv_v = data_frame.values[:,19]*1e3 # J/kgK\r\ncp_v = data_frame.values[:,20]*1e3 # J/kgK\r\nmu_v = data_frame.values[:,23] # Pa*s\r\nk_v = data_frame.values[:,24] # W/mK\r\n\r\n#%% Assign additional constants to the fluid\r\n# Vapor molar mass\r\nM_v = 28e-03 # kg/mol\r\n# Ratio of specific heats\r\ngamma = 1.4\r\n# Reference saturation temperature\r\nT_sat_ref = 77.35 # K\r\n\r\n#%% Maximum and mininum temperatures\r\nT_max = np.max(T_l)\r\nT_min = np.min(T_l)" }, { "alpha_fraction": 0.5492957830429077, "alphanum_fraction": 0.5492957830429077, "avg_line_length": 13.199999809265137, "blob_id": "bd6927b0085343005d741d23062b3dff434a3541", "content_id": "bd761fe4449060d17a02e3cc7a3d7500f5aab4d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 71, "license_type": "no_license", "max_line_length": 24, "num_lines": 5, "path": "/Documentation/code.rst", "repo_name": "PedroAfonsoMarques/TSC_Exam", "src_encoding": "UTF-8", "text": "SMOOTHIE Module\n===============\n\n.. automodule:: smoothie\n :members:\n" } ]
11
courtneymcbeth/pianobot
https://github.com/courtneymcbeth/pianobot
6c2ec7e52b39153edacf1dd8f2245090274a5cf9
c33548de0981f46f0280e86aa604811db75f5575
c2e2ede899c2bb8823c5bec8cd591048c739a267
refs/heads/master
2020-07-04T09:30:20.105043
2019-08-18T18:32:13
2019-08-18T18:32:13
202,241,488
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.463941752910614, "alphanum_fraction": 0.5332393646240234, "avg_line_length": 24.957317352294922, "blob_id": "7c8e897309a0c4873a8a0ffe4a4bac17e580a661", "content_id": "dcea535e147b54c084dda1980e484b38dbf5c4e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4257, "license_type": "no_license", "max_line_length": 74, "num_lines": 164, "path": "/piano.ino", "repo_name": "courtneymcbeth/pianobot", "src_encoding": "UTF-8", "text": "/*\n * Info: keys range from 21 to 108. Mapping from left to right:\n * 0: 21 - 35 (sheild pins 0-14)\n * 1: 36 - 50 (sheild pins 0-14)\n * 2: 51 - 65 (sheild pins 0-14)\n * 3: 66 - 80 (sheild pins 0-14)\n * 4: 81 - 95 (sheild pins 0-14)\n * 5: 96 - 108 (sheild pins 0-12)\n */\n\n#include <Wire.h>\n#include <Adafruit_PWMServoDriver.h>\n\n// sheild controllers from left to right\nAdafruit_PWMServoDriver pwm[] = {\n Adafruit_PWMServoDriver(&Wire, 0x40),\n Adafruit_PWMServoDriver(&Wire, 0x41),\n Adafruit_PWMServoDriver(&Wire, 0x42),\n Adafruit_PWMServoDriver(&Wire, 0x43),\n Adafruit_PWMServoDriver(&Wire, 0x44),\n Adafruit_PWMServoDriver(&Wire, 0x45)\n};\n\n// stores the MIDI key values of sharps and flats\nint blackKeys[] = {22, 25, 27, 30, 32, 34, 37, 39, 42, 44, 46, 49, \n 51, 54, 56, 58, 61, 63, 66, 68, 70, 73, 75, 78, 80, 82, 85, 87, \n 90, 92, 94, 97, 99, 102, 104, 106};\n\n// 1 if key is pressed, 0 otherwise\nint keyPosition[88];\n\n// Servo controls\n#define SERVOMIN 300 // min pulse length for white keys\n#define SERVOMAX 450 // max pulse length for white keys\n#define BLACKMIN 400 // min pulse length for black keys\n#define BLACKMAX 550 // max pulse length for black keys\n\nvoid toggleBlackKey(int keyNum, Adafruit_PWMServoDriver driver, int num) {\n if (keyPosition[keyNum-21] == 0) {\n driver.setPWM(num, 0, BLACKMIN);\n keyPosition[keyNum-21] = 1;\n } else {\n driver.setPWM(num, 0, BLACKMAX);\n keyPosition[keyNum-21] = 0;\n }\n}\n\nvoid toggleWhiteKey(int keyNum, Adafruit_PWMServoDriver driver, int num) {\n if (keyPosition[keyNum-21] == 0) {\n driver.setPWM(num, 0, SERVOMIN);\n keyPosition[keyNum-21] = 1;\n } else {\n driver.setPWM(num, 0, SERVOMAX);\n keyPosition[keyNum-21] = 0;\n }\n}\n\nvoid toggleKey(int key) {\n boolean pressed = false;\n \n // check if key is in driver 0 range\n if (key >= 21 && key <= 35) {\n for (int i = 0; i < 36; i++) {\n if (blackKeys[i] == key) {\n toggleBlackKey(key, pwm[0], key - 21);\n pressed = true;\n break;\n }\n }\n if (!pressed) {\n toggleWhiteKey(key, pwm[0], key - 21);\n }\n \n // check if key is in driver 1 range\n } else if (key >= 36 && key <= 50) {\n for (int i = 0; i < 36; i++) {\n if (blackKeys[i] == key) {\n toggleBlackKey(key, pwm[1], key - 36);\n pressed = true;\n break;\n }\n }\n if (!pressed) {\n toggleWhiteKey(key, pwm[1], key - 36);\n }\n\n // check if key is in driver 2 range\n } else if (key >= 51 && key <= 65) {\n for (int i = 0; i < 36; i++) {\n if (blackKeys[i] == key) {\n toggleBlackKey(key, pwm[2], key - 51);\n pressed = true;\n break;\n }\n }\n if (!pressed) {\n toggleWhiteKey(key, pwm[2], key - 51);\n }\n\n // check if key is in driver 3 range\n } else if (key >= 66 && key <= 80) {\n for (int i = 0; i < 36; i++) {\n if (blackKeys[i] == key) {\n toggleBlackKey(key, pwm[3], key - 66);\n pressed = true;\n break;\n }\n }\n if (!pressed) {\n toggleWhiteKey(key, pwm[3], key - 66);\n }\n\n // check if key is in driver 4 range\n } else if (key >= 81 && key <= 95) {\n for (int i = 0; i < 36; i++) {\n if (blackKeys[i] == key) {\n toggleBlackKey(key, pwm[4], key - 81);\n pressed = true;\n break;\n }\n }\n if (!pressed) {\n toggleWhiteKey(key, pwm[4], key - 81);\n }\n\n // check if key is in driver 5 range\n } else if (key >= 96 && key <= 108) {\n for (int i = 0; i < 36; i++) {\n if (blackKeys[i] == key) {\n toggleBlackKey(key, pwm[5], key - 96);\n pressed = true;\n break;\n }\n }\n if (!pressed) {\n toggleWhiteKey(key, pwm[5], key - 96);\n }\n }\n}\n\nvoid setup() {\n Serial1.begin(9600); // setup serial 1 rx and tx\n\n // setup servos\n for (int i = 0; i < 6; i++) {\n pwm[i].begin();\n pwm[i].setPWMFreq(60); // servos at 60 Hz update\n }\n delay(10);\n for (int i = 0; i < 88; i++) {\n keyPosition[i] = 1;\n }\n\n for (int i = 21; i <= 108; i++) {\n toggleKey(i);\n }\n}\n\nvoid loop() {\n if (Serial1.available()) {\n int key = Serial1.parseInt();\n toggleKey(key);\n }\n}\n" }, { "alpha_fraction": 0.5758354663848877, "alphanum_fraction": 0.5818337798118591, "avg_line_length": 25.522727966308594, "blob_id": "67601ef92a97cbf349a106c74fd111dab42f92e9", "content_id": "abe4363958a1bb94dfa793ab0eef6e5e0a156010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1167, "license_type": "no_license", "max_line_length": 70, "num_lines": 44, "path": "/midi_to_txt.py", "repo_name": "courtneymcbeth/pianobot", "src_encoding": "UTF-8", "text": "import py_midicsv\nfrom command import Command\n\nfilename = \"Flight_of_the_Bumblebee.mid\" # MIDI file for song to play\n\n# Load the MIDI file and parse it into CSV format\ncsv_string = py_midicsv.midi_to_csv(filename)\n\n# file names\nbaseName = filename.lower().split(\".\")[0]\ncsvName = baseName + \".csv\"\ntxtName = baseName + \".txt\"\n\n# write csv file\nwith open(csvName, 'w') as file:\n for line in csv_string:\n file.write(line)\n\n# read commands from the csv file\ncmds = []\nwith open(csvName, \"r\") as file:\n eof = False\n while not eof:\n line = file.readline()\n lst = line.split(\",\")\n if \"Note_on_c\" in lst[2]:\n cmds.append(Command(int(lst[1]), int(\n lst[4]), not int(lst[5]) == 0))\n if \"End_of_file\" in lst[2]:\n eof = True\n\n# sort commands by time (earliest first)\ncmds.sort(key=lambda c: c.time)\n\n# write commands to a txt file\nwith open(txtName, \"w\") as file:\n for c in cmds:\n if c.cmd:\n file.write(str(c.time) + \",\" + str(c.key) + \",\" + \"on\\n\")\n else:\n file.write(str(c.time) + \",\" + str(c.key) + \",\" + \"off\\n\")\n file.write(\"eof\")\n\nprint(\"success\")\n" }, { "alpha_fraction": 0.4573170840740204, "alphanum_fraction": 0.47560974955558777, "avg_line_length": 17.22222137451172, "blob_id": "5c6b368a081d978ad4fa5b005ba60f747b5b0aa4", "content_id": "388e8bebfd548a23fe640bfa4dbbd416460385d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 164, "license_type": "no_license", "max_line_length": 39, "num_lines": 9, "path": "/command.py", "repo_name": "courtneymcbeth/pianobot", "src_encoding": "UTF-8", "text": "class Command:\n time = 0\n key = 0\n cmd = 0\n\n def __init__(self, time, key, cmd):\n self.time = time\n self.key = key\n self.cmd = cmd\n" }, { "alpha_fraction": 0.44301289319992065, "alphanum_fraction": 0.46085232496261597, "avg_line_length": 36.37036895751953, "blob_id": "273cd09f5edbacd7f87fb36e1f45de21bc40ef66", "content_id": "3fc00b3c2e2dd06c9b0845de7a2d5355e48fd13d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1009, "license_type": "no_license", "max_line_length": 58, "num_lines": 27, "path": "/rxn_time.py", "repo_name": "courtneymcbeth/pianobot", "src_encoding": "UTF-8", "text": "filename = \"flight_of_the_bumblebee.txt\"\naddOn = 100 # time in ms to extend each note\ndelay = 0 # total delay - do not modify\n\nnewName = filename.split(\".\")[0] + \"_rxn.txt\"\n\nlastTime = 0\nlastKeys = []\nwith open(filename, \"r\") as origFile:\n with open(newName, \"w\") as newFile:\n line = origFile.readline()\n while not \"eof\" in line:\n lst = line.split(\",\")\n if \"off\" in lst[2] or int(lst[1]) in lastKeys:\n if not int(lst[0]) == lastTime:\n delay = delay + addOn\n lastTime = int(lst[0])\n lastKeys = [int(lst[1])]\n else:\n lastKeys.append(int(lst[1]))\n newFile.write(str(int(lst[0]) + delay) +\n \",\" + lst[1] + \",\" + lst[2])\n else:\n newFile.write(str(int(lst[0]) + delay) +\n \",\" + lst[1] + \",\" + lst[2])\n line = origFile.readline()\n newFile.write(\"eof\\n\")\n" }, { "alpha_fraction": 0.5692771077156067, "alphanum_fraction": 0.5903614163398743, "avg_line_length": 24.538461685180664, "blob_id": "e1ffcab1639e50a19794841e51f704a1ba4ff3d3", "content_id": "92cf5ce151005b3e8a8cb26ebca950deb38b760f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "no_license", "max_line_length": 52, "num_lines": 26, "path": "/transmit_serial.py", "repo_name": "courtneymcbeth/pianobot", "src_encoding": "UTF-8", "text": "import time\nimport serial\n\nfileName = \"the_entertainer_rxn.txt\"\n# file format: time(ms),key,on/off\n\n\ndef time_since(t):\n return time.time() - t # seconds\n\n\narduino = serial.Serial('/dev/tty.HC-05-DevB', 9600)\ntime.sleep(2) # give arduino time to reboot\nstart_time = time.time()\nwith open(fileName, \"r\") as file:\n line = file.readline()\n while not \"eof\" in line:\n lst = line.split(\",\")\n t = time_since(start_time)\n while t * 1000 < int(lst[0]):\n t = time_since(start_time)\n arduino.write((lst[1] + \"\\n\").encode())\n #print(lst[1] + \"\\n\")\n arduino.flush()\n line = file.readline()\narduino.close()\n" }, { "alpha_fraction": 0.7661870718002319, "alphanum_fraction": 0.7895683646202087, "avg_line_length": 78.42857360839844, "blob_id": "bc306c566930bd99b5514ce36e77b28e054ad6b1", "content_id": "df26c6bdc77d9d6ebe74f2a1c6cdfb14c8bf187f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 556, "license_type": "no_license", "max_line_length": 346, "num_lines": 7, "path": "/README.md", "repo_name": "courtneymcbeth/pianobot", "src_encoding": "UTF-8", "text": "# PianoBot\n\nPianoBot is a robot that plays the piano. The version that I have constructed is suitable for use on a standard piano with 88 keys. Music is converted from a MIDI file to commands that are transmitted from a computer to an Arduino (an Arduino Mega 2560 in my case; however, other models would also work) using serial communication over bluetooth.\n\n![Image of Piano](/photos/piano_design.jpg)\n\nFor a full description of the project, see [my write up on the Arduino Project Hub](https://create.arduino.cc/projecthub/cmcbeth5287/pianobot-80bae6).\n" } ]
6
daviesray-ornyx/bplanner
https://github.com/daviesray-ornyx/bplanner
bdb7c0f9650d3a1ea2ec3092d34ae9ab37158949
90acba3154936e69d03ba20b561519f337faa9e5
d78e40ed3513a9739f76e8aed8496226f563ea5c
refs/heads/master
2022-12-16T22:21:43.226193
2018-11-16T23:29:02
2018-11-16T23:29:02
147,232,729
0
0
null
2018-09-03T17:05:32
2018-11-16T23:29:33
2022-11-22T02:50:48
JavaScript
[ { "alpha_fraction": 0.6037991642951965, "alphanum_fraction": 0.6268656849861145, "avg_line_length": 37.78947448730469, "blob_id": "f0ec765afdb3a04a9fcf9848ddf861be73f57b20", "content_id": "bed77ad371f6d04cdb199f5aabb7ae2a9068646d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1474, "license_type": "no_license", "max_line_length": 117, "num_lines": 38, "path": "/bplanner/migrations/0005_auto_20180911_0234.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-10 23:34\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0004_auto_20180910_1227'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='bad_debts',\n field=models.FloatField(blank=True, default=None, null=True, verbose_name='Bad Debts (% of revenue)'),\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='inflation_rate',\n field=models.FloatField(blank=True, null=True, verbose_name='Inflation Rate (Per Annum)'),\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='number_of_products_or_services',\n field=models.IntegerField(blank=True, null=True, verbose_name='Number of Products or Services Offered '),\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='salary_growth_rate',\n field=models.FloatField(blank=True, null=True, verbose_name='Salary Growth Rate (Per Annum)'),\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='tax_slabs_table',\n field=models.TextField(blank=True, max_length=500, null=True, verbose_name='Tax Slabs'),\n ),\n ]\n" }, { "alpha_fraction": 0.5776397585868835, "alphanum_fraction": 0.6438923478126526, "avg_line_length": 25.83333396911621, "blob_id": "9e3ce67a8e2b2323ef37c3e103501b874a96dc76", "content_id": "d33f33f8e1c8a7a7a8406b56807d861c8516b0f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 483, "license_type": "no_license", "max_line_length": 100, "num_lines": 18, "path": "/bplanner/migrations/0018_auto_20181005_0328.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-10-05 00:28\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0017_auto_20181005_0316'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='first_financial_year_month',\n field=models.IntegerField(default=1, verbose_name='Month Index of starting operations'),\n ),\n ]\n" }, { "alpha_fraction": 0.71385657787323, "alphanum_fraction": 0.7188814282417297, "avg_line_length": 63.00699234008789, "blob_id": "455f4c7be67c05c11e7f8eafb425c1efbc55dd3b", "content_id": "4352401e26efc0368950ef00638f1832a464070c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18309, "license_type": "no_license", "max_line_length": 196, "num_lines": 286, "path": "/bplanner/models.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom bplanner.choices import *\nfrom Project import settings\nimport os\n\nclass Profile(models.Model):\n user = models.ForeignKey(User,related_name='user_profile', verbose_name=\"User\", null=True, on_delete=models.DO_NOTHING)\n usage = models.FloatField(verbose_name='Space Used', null=True, blank=True, default=0)\n\n def get_usage(self):\n return round(self.usage/1000000, 4)\n\n def __str__(self):\n return self.user.username\n\n class Meta:\n verbose_name = \"User Profile\"\n verbose_name_plural = \"User Profiles\"\n\nclass BusinessPlanTitlePage(models.Model):\n company_name = models.CharField(verbose_name='Company, Business, or Project name', max_length=500, null=True, blank=True, default=\"\")\n tagline = models.CharField(verbose_name='Tagline', max_length=500, null=True, blank=True, default='')\n address = models.CharField(verbose_name='Business address', max_length=500, null=True, blank=True, default='')\n phone_number = models.CharField(verbose_name='Phone Number', max_length=15, null=True, blank=True, default='')\n email = models.CharField(verbose_name='Email', max_length=500, null=True, blank=True, default='')\n website = models.CharField(verbose_name='Website', max_length=500, null=True, blank=True, default='')\n presented_to = models.CharField(verbose_name='Presented to', max_length=500, null=True, blank=True, default='')\n logo = models.TextField(verbose_name='Logo', null=True, blank=True, default='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAMAAACahl6sAAAAM1BMVEX////Oz9D+/v6ZmZnU1db5+fnY2dr4+Pjs7OzR0tPn6Oj19fXk5OTw8PDX2Nnq6+ve3+Ds93GrAAAHbUlEQVR4nO2d65qrKgyGp2xROSn3f7VL1EDw0Mo0OLSb76/6lBdICBDoz8/3iH2BZo7/vkBsBvnjHkGgClKaKkhpqiClqYKUpgpSmipIaaogpamClKYKUpoqSGmqIKWpgpSmClKaKkhpqiClqYKUpgpSmipIaaogSeJdxzP/xA0gjCup9ag4y/kruUEmirF9zGr7nM2SF4R3vRUPL2FNl6tZMoIwbmzz2EjoTF0sF4gzjAGVH7EMWcwlCwgyjFlNq/WAWTKYSwaQrWEM2kw/wowdcpoLNQg3UuLyNrb3dc9H26BnDam5kIIw1ePGEE0ru/iNTrbY/tvJXGh+mhCEKWNwl5oMQx29p/SAWawx3dFrqSIDUUaj8q2GcayNuTRSmvd/nwiES4spkGGcvB+Zi7Dj28ZCA8LCmDFRbA3jWEojc2n7N0tABKI0UAw2oZuYMLpcg38iGhA5161oX3aprXhvW0HSJDQgi523h17qlcxQHMhjkEYlWS3rzNKYhYE4/9NfDz1wXFkaiBsW7KXheoorkccuEcSx6BfNMkUBOAgoFsSZyznLZBhREDCPjMWCuLLJoy42hcdowjVFZNK0hYPMph+zbAxDtHoaPln5IIu5gEfeGIYbPucn/CNAwFy2hjHYEdrqY0BcMaWM5vBWoiDgk0CwFsNA+kgQbxhIHwiCDAPp00BEZBhInwZizz7+NBB59nEFiVRBIlUQpwoSqYJEqiBOFSRSBYlUQZwqSKQCQD5jOeg1CJ/X4wsBkb8GYWZZsUvZ6DoU7dZbKgjzS3ZHk/kkEe3qmvY5xSEIMxpWHu2vNruwiEBY/9JMdiDKYzTHqQVJIksY4Hqz4/ECpPOL8o2WBCkphLkoxj5FiUB4SIOymiQdhTSpxrRPTAWBgKt6uP1TkkwU8jQnc+6IPUhwVSTGsYo6X4v1ZygrCHJVQhMk04DoM+gYzq/ZgSi0k32eP/QL5chp5OORqTiQ4Koe+1X595QnyzTKMPUgCLB9eyTfKle6rNoNkBq7KjIb98qXwByMevVQGVwVUs5M7G12A72rQsqaG8/6rQMjdlVImU8rbBwYtatCyn5+BKcykbsqpBsOwqwOLIerQrrlaJI70ZMX47bDYjz3Eat66q04VZDSlADizlVEym7AKUoAmUbpSO+vRVEqAaTbLCeKPNHfL5XSIhuQ5lNBvqdFjEVHV5rWEq1I0SjF/XLT63WC0YyK7LwajRLHESbXWXfWI92/USII2MlQVnP8VJCnIMxIbSfpo7GfGfdMz3Nerpzit7iR7lsrEyf39CBM+iOGB4dDx/WIe+M2DeXghMejLpzpa9L2G8hBTButAQ3RJg5Ha13N+LO6wACyWWtN2eolBmH9bi3ehlfj2ECMGxCut+tg4vpeFjGIOViS01AYJo/XHgFE7r8Vl9uEFkQd7Y2I8dnTADIebdw1V0NsUhAGXUfIaboyNnFh/NOH7aenYb1+AeFgH8M4fewbz17sXKQg0LGaxXUqWDBdOpeCosv547A7vYBA0ZdpDjNQDRfdMCUIVLmA7UIo+jCXDUwA6phZDAIN4vsS9LSLTpgSpIMraeAhRGazyUK5w7AxYmOHFvB+irW4Fm4FgZ4VNqLBvDXG9NF/j0HAgEKx174mrvUtSpAR1f8ivraCG0ugyr03jltkfXMIkxyohWtHywlBfIgfKhW6kzsgvm8vDAIdCQ2fHZDfDqL3IHoPEtoLg/A9CBv+GKRFz+QOBM30j0A0Moi2IJAxFUQWBPK8a70AKalrUdkIB2O/tFrzPgjrtZRuxsf65RmaKGH32z/3WuDf9u732s7j+yDTQCfEbBjmsS2qQmMHxCsn4wiEN6E5gfyukX0prAOBoobusdr6wwXy/PnI7hvMc+5CmttAfNAIE5AOB4LQeXxI6aOSJWgc0LtOYFMXFzQTQfh+gY4LAPG/vd70wuIwvvdB/vKhjMN4wFqDY6iETGE8TDFE2CZZanIGAdue7xVQYxuX3M+cGuumXSFRZXkM9iQGqTqlYfLSXjL1xI2eQURX4mEtvxcms00Trp2Cbh/m5KLBd7gtIGFGPz0Nk+KrV9+kgEhxng67gIAHjuRtnx3nCMKc/fDx5eSVNzZ6DkB8BIw5grF2cV7ddhWF70myLAddAUFzbSgK7uMdXoDz1wd6/8o31dAkLJum7FjJZtZh/2p4eC3Uu9jeRMf90srQs34L8sNwBuFk9AnLv0nb0/0sow6E7tdhnYJF7P39mO5akXkRe/oA6t/Ez9dFbJV2IWWehAHGnU4K4h7O/+cLrlztn59+fKo/zHzYxyzv6A9BIGa0F4e857oXhMmwYeLdG02e470gXSuaVvdThBLuPKXpWTeDLK0gnLyXJcqfvRfkYAeE4liS060gB9EUFcfNINsgJyUGeaGbvVZ0V/bQE94mffc4Mg3bapRa6nGKX0o/0fNK/FcxyAv9H5Mzy1YFKU0VpDRVkNJUQUpTBSlNFaQ0VZDSVEFKUwUpTRWkNFWQ0lRBSlMFKU0VpDR9G8gXaN5vYV+gP+4RlPoHaRlbGlr/X7oAAAAASUVORK5CYII=')\n\n size = models.FloatField(verbose_name='Size', null=True, blank=True, default=0) # Store size of page\n bplan_size = models.FloatField(verbose_name='Size', null=True, blank=True, default=0) # Store size of the entire business plan\n\n date_created = models.DateTimeField(verbose_name='Date Created', blank=True, null=True)\n date_modified = models.DateTimeField(verbose_name='Date Modified', blank=True, null=True)\n owner = models.ForeignKey(User, verbose_name=\"Business Plan Owner\", null=True, blank=True, on_delete=models.DO_NOTHING) # Ensure this is changed to False on deployment!!\n\n def get_static_root(self):\n print(settings.MEDIA_ROOT)\n print(self.logo.url)\n print(os.path.join(settings.MEDIA_ROOT, self.logo.name))\n return os.path.join(settings.MEDIA_ROOT, self.logo.name)\n\n def save(self, *args, **kwargs):\n ''' On save, update timestamps '''\n if not self.id:\n self.created = timezone.now()\n self.modified = timezone.now()\n return super(BusinessPlanTitlePage, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.company_name if self.company_name is not None else 'Missing name Business Plan'\n\n class Meta:\n verbose_name = \"Business Plan Title Page\"\n verbose_name_plural = \"Business Plans Title Page\"\n ordering = ['-date_modified', '-date_created']\n\nclass BusinessPlanMainContent(models.Model):\n title_page = models.ForeignKey(BusinessPlanTitlePage, verbose_name=\"Business Plan Title Page\", null=True, on_delete=models.CASCADE)\n mission_vision = models.TextField(verbose_name='Mission and Vision', null=True, blank=True, default='')\n executive_summary = models.TextField(verbose_name='Executive Summary', null=True, blank=True, default='')\n company_description = models.TextField(verbose_name='Company Description', null=True, blank=True, default='')\n key_success_factors = models.TextField(verbose_name='Key Success Factors', null=True, blank=True, default='')\n objectives = models.TextField(verbose_name='Objectives', null=True, blank=True, default='')\n industry_analysis = models.TextField(verbose_name='Industry Analysis', null=True, blank=True, default='')\n tam_sam_som_analysis = models.TextField(verbose_name='TAM-SAM-SOM Analysis', null=True, blank=True, default='')\n swot_analysis = models.TextField(verbose_name='SWOT Analysis', null=True, blank=True, default='')\n insights = models.TextField(verbose_name='Insights', null=True, blank=True, default='')\n marketing_plan = models.TextField(verbose_name='Marketing Plan', null=True, blank=True, default='')\n ownership_and_management_plan = models.TextField(verbose_name='Ownership and Management Plan', null=True, blank=True, default='')\n milestones = models.TextField(verbose_name='Milestones', null=True, blank=True, default='')\n\n size = models.FloatField(verbose_name='Size', null=True, blank=True, default=0) # Store size of page\n\n date_created = models.DateTimeField(verbose_name='Date Created', blank=True, null=True)\n date_modified = models.DateTimeField(verbose_name='Date Modified', blank=True, null=True)\n owner = models.ForeignKey(User, verbose_name=\"Business Plan Owner\", null=True, blank=True, on_delete=models.DO_NOTHING) # Ensure this is changed to False on deployment!!\n\n def save(self, *args, **kwargs):\n ''' On save, update timestamps '''\n if not self.id:\n self.created = timezone.now()\n self.modified = timezone.now()\n return super(BusinessPlanMainContent, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.title_page.company_name if self.title_page is not None else 'Missing name Business Plan'\n\n class Meta:\n verbose_name = \"Business Plan Main Content\"\n verbose_name_plural = \"Business Plans Main Content\"\n ordering = ['-date_modified', '-date_created']\n\nclass BusinessPlanFinancialAssumptions(models.Model):\n title_page = models.ForeignKey(BusinessPlanTitlePage, verbose_name=\"Business Plan Title Page\", null=True, on_delete=models.CASCADE)\n currency = models.IntegerField(choices=CURRENCY_CHOICES, verbose_name='Currency', default=0)\n first_financial_year = models.IntegerField(verbose_name='First Financial Year', null=True, blank=True)\n first_financial_year_month = models.IntegerField(verbose_name='Month Index of starting operations', null=False, blank=False, default=1)\n projection_years = models.IntegerField(verbose_name='Period of Projections in Years', null=True, blank=True)\n offerings_products_or_services = models.IntegerField(choices=OFFERING_CHOICES, verbose_name='Products/Services', default=0)\n number_of_products_or_services = models.IntegerField(verbose_name='Number of Products or Services Offered ', null=True, blank=True)\n product_services_table = models.TextField(verbose_name='Products or Services Offered Table', null=True, blank=True, default='')\n count_of_months_in_financial_year = models.IntegerField(verbose_name='Number of Months in Projection Year', null=True, blank=True, default=12)\n inflation_rate = models.IntegerField(verbose_name='Inflation Rate (Per Annum)', null=True, blank=True)\n salary_growth_rate = models.IntegerField(verbose_name='Salary Growth Rate (Per Annum)', null=True, blank=True)\n amortization_period = models.IntegerField(verbose_name='Startup Cost Amortization Period (in Years) ', null=True, blank=True)\n trade_receivables = models.IntegerField(verbose_name='Trade Receivables (period in months)', null=True, blank=True)\n trade_payables = models.IntegerField(verbose_name='Trade Payables (period in months)', null=True, blank=True)\n other_expenses_payables = models.IntegerField(verbose_name='Other Expenses Payable', null=True, blank=True)\n bad_debts = models.FloatField(verbose_name='Bad Debts (% of revenue)', null=True, blank=True, default=None)\n taxation_system = models.IntegerField(choices=TAXATION_SYSTEM_CHOICES, verbose_name='Taxation System', default=0)\n corporate_tax_rate = models.IntegerField(verbose_name='Corporate Tax Rate ', null=True, blank=True, default=20)\n tax_slabs_table = models.TextField(verbose_name='Tax Slabs', null=True, blank=True)\n\n\n size = models.FloatField(verbose_name='Size', null=True, blank=True, default=0) # Store size of page\n\n date_created = models.DateTimeField(verbose_name='Date Created', blank=True, null=True)\n date_modified = models.DateTimeField(verbose_name='Date Modified', blank=True, null=True)\n owner = models.ForeignKey(User, verbose_name=\"Business Plan Owner\", null=True, on_delete=models.DO_NOTHING) # Ensure this is changed to False on deployment!!\n\n def save(self, *args, **kwargs):\n ''' On save, update timestamps '''\n if not self.id:\n self.created = timezone.now()\n self.modified = timezone.now()\n return super(BusinessPlanFinancialAssumptions, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.title_page.company_name if self.title_page is not None else 'Missing name Business Plan'\n\n class Meta:\n verbose_name = \"Business Plan FinancialAssumptions\"\n verbose_name_plural = \"Business Plans FinancialAssumptions\"\n ordering = ['-date_modified', '-date_created']\n\nclass BusinessPlanFinancialDataInput(models.Model):\n title_page = models.ForeignKey(BusinessPlanTitlePage, verbose_name=\"Business Plan Title Page\", null=True, on_delete=models.CASCADE)\n financial_input = models.TextField(verbose_name='Financial Input', null=True, blank=True, default='')\n\n size = models.FloatField(verbose_name='Size', null=True, blank=True, default=0) # Store size of page\n\n date_created = models.DateTimeField(verbose_name='Date Created', blank=True, null=True)\n date_modified = models.DateTimeField(verbose_name='Date Modified', blank=True, null=True)\n owner = models.ForeignKey(User, verbose_name=\"Business Plan Owner\", null=True, on_delete=models.DO_NOTHING) # Ensure this is changed to False on deployment!!\n\n def save(self, *args, **kwargs):\n ''' On save, update timestamps '''\n if not self.id:\n self.created = timezone.now()\n self.modified = timezone.now()\n return super(BusinessPlanFinancialDataInput, self).save(*args, **kwargs)\n\n def __str__(self):\n return self.title_page.company_name if self.title_page is not None else 'Missing name Business Plan'\n\n class Meta:\n verbose_name = \"Business Plan Financial Data Input\"\n verbose_name_plural = \"Business Plans Financial Data Input\"\n ordering = ['-date_modified', '-date_created']\n\nclass BusinessPlanSettings(models.Model):\n title_page = models.ForeignKey(BusinessPlanTitlePage, verbose_name=\"Business Plan Title Page\", null=True, on_delete=models.CASCADE)\n step_monitor = models.TextField(verbose_name='Step Monitor', null=True, blank=True, default='')\n\n calendar_months = models.TextField(verbose_name='Step Monitor', null=True, blank=True, default='')\n projection_months_list = models.TextField(verbose_name='Step Monitor', null=True, blank=True, default='')\n projection_years = models.IntegerField(verbose_name='Number of projection years', null=True, blank=True, default=0)\n first_financial_year = models.IntegerField(verbose_name='First Financial Year', null=True, blank=True, default=2018)\n last_financial_year = models.IntegerField(verbose_name='Last Financial Year', null=True, blank=True)\n count_of_months_in_financial_year = models.IntegerField(verbose_name='Count of months in a financial year', null=True, blank=True, default=12)\n projection_years_list = models.TextField(verbose_name='Projection years list', null=True, blank=True, default='[]')\n projection_years_list_display = models.TextField(verbose_name='Projection years list for Display', null=True, blank=True, default='[]')\n product_count = models.IntegerField(verbose_name='Products/Services count', null=True, blank=True)\n products = models.TextField(verbose_name='Step Monitor', null=True, blank=True, default='')\n theme = models.TextField(verbose_name='Theme', null=True, blank=True, default='')\n cost_appropriation_methods = models.TextField(verbose_name='Cost Appropriation Methods', null=True, blank=True, default='')\n operating_cost_list = models.TextField(verbose_name='Operating Costs List', null=True, blank=True, default='')\n employees_list = models.TextField(verbose_name='Employees List', null=True, blank=True, default='')\n capital_sources_list = models.TextField(verbose_name='Capital Sources List', null=True, blank=True, default='')\n tangible_assets_list = models.TextField(verbose_name='Tangible Assets List', null=True, blank=True, default='')\n intangible_assets_list = models.TextField(verbose_name='Intangible Assets List', null=True, blank=True, default='')\n deposit_item_list = models.TextField(verbose_name='Deposit Items List', null=True, blank=True, default='')\n startup_cost_item_list = models.TextField(verbose_name='Startup Cost Items List', null=True, blank=True, default='')\n total_assets = models.TextField(verbose_name='Total Assets', null=True, blank=True, default='')\n total_liabilities = models.TextField(verbose_name='Total Liabilities', null=True, blank=True, default='')\n tangible_assets_balance_total = models.TextField(verbose_name='Tangible Assets Balance Total', null=True, blank=True, default='')\n intangible_assets_balance_total = models.TextField(verbose_name='Intangible Assets Balance Total', null=True, blank=True, default='')\n cashFlow_changes_during_the_year_per_month = models.TextField(verbose_name='Cash Flow Changes During The Year Per Month', null=True, blank=True, default='')\n closing_cash_balance_per_month = models.TextField(verbose_name='Closing Cash Balance Per Month', null=True, blank=True, default='')\n revenue_totals_per_year = models.TextField(verbose_name='Revenue Totals Per Year', null=True, blank=True, default='')\n direct_cost_totals_per_year = models.TextField(verbose_name='Direct Cost Totals Per Year', null=True, blank=True, default='')\n gross_profit = models.TextField(verbose_name='Gross Profit', null=True, blank=True, default='')\n operating_cost_totals_per_year = models.TextField(verbose_name='Operating Cost Totals Per Year', null=True, blank=True, default='')\n eat = models.TextField(verbose_name='EAT', null=True, blank=True, default='')\n net_margin_per_month = models.TextField(verbose_name='Net Margin Per Month', null=True, blank=True, default='')\n month_list_initiated = models.BooleanField(verbose_name='Month List Initiated', null=False, blank=False, default=False)\n year_list_initiated = models.BooleanField(verbose_name='Year List Initiated', null=False, blank=False, default=False)\n\n def __str__(self):\n return self.title_page.company_name + ' settings' if self.title_page is not None else 'Unnamed Business Plan settings'\n\n class Meta:\n verbose_name = \"Business Plan Settings\"\n verbose_name_plural = \"Business Plans Settings\"\n ordering = ['-id']\n\nclass BusinessPlanSample(models.Model):\n title_page = models.ForeignKey(BusinessPlanTitlePage, verbose_name=\"Business Plan Title Page\", null=True, on_delete=models.CASCADE)\n display_name = models.CharField(verbose_name='Display Name', max_length=500, null=True, blank=True)\n business_types = models.IntegerField(choices=BUSINESS_TYPES, verbose_name='Business Type', default=1)\n\n def ___str__(self):\n return self.display_name\n\n class Meta:\n verbose_name = \"Business Plan Sample\"\n verbose_name_plural = \"Business Plans Samples\"\n\nclass HelpSection(models.Model):\n ref_id = models.CharField(max_length=250, verbose_name='Ref Id', null=True, blank=True, default='')\n title = models.CharField(max_length=250, verbose_name='Help Section Title', null=True, blank=True, default='')\n description = models.TextField(verbose_name='Desription', null=True, blank=True, default='')\n # links_and_sources = models.TextField(verbose_name='Links and Sources', null=True, blank=True, default='')\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Help Section'\n verbose_name_plural = 'Help Sections'\n ordering = ['ref_id']\n\nclass HelpSubSection(models.Model):\n help_section = models.ForeignKey(HelpSection, related_name='rel_help_sections', verbose_name='Help Section', null=True, blank=True, on_delete=models.CASCADE)\n title = models.CharField(max_length=250, verbose_name='Help Sub-Section Title', null=True, blank=True, default='')\n instruction = models.TextField(verbose_name='Instruction', null=True, blank=True, default='')\n\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Help Sub-Section'\n verbose_name_plural = 'Help Sub-Sections'\n\nclass HelpSubSectionExample(models.Model):\n # Examples that can be classified for NGO and Business categories\n help_sub_section = models.ForeignKey(HelpSubSection, related_name='rel_help_sub_section_examples', verbose_name='Help Sub-Section', null=True, blank=True, on_delete=models.CASCADE)\n title = models.CharField(max_length=250, verbose_name='Example Title', null=True, blank=True, default='')\n example = models.TextField(verbose_name='Example', null=True, blank=True, default='')\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Help Sub-Section Example'\n verbose_name_plural = 'Help Sub-Section Examples'\n\nclass Currency(models.Model):\n # Examples that can be classified for NGO and Business categories\n code = models.CharField(max_length=150, verbose_name='Codes', null=True, blank=True, default='')\n full_name = models.CharField(max_length=250, null=True, blank=True, default='')\n rate_to_dollar = models.FloatField(verbose_name='Rate to Dollar', null=True, blank=True, default=0.00)\n\n def __str__(self):\n return self.code\n\n class Meta:\n verbose_name = 'Currency'\n verbose_name_plural = 'Currencies'\n\nclass Month(models.Model):\n # Examples that can be classified for NGO and Business categories\n code = models.CharField(max_length=150, verbose_name='Codes', null=True, blank=True, default='')\n full_name = models.CharField(max_length=250, null=True, blank=True, default='')\n order = models.FloatField(verbose_name='Month order', null=True, blank=True, default=0)\n\n def __str__(self):\n return self.code\n\n class Meta:\n verbose_name = 'Month'\n verbose_name_plural = 'Months'\n\n\n\n" }, { "alpha_fraction": 0.5581395626068115, "alphanum_fraction": 0.5639534592628479, "avg_line_length": 30.18181800842285, "blob_id": "a3bb39d10e60f8da6472d023621b5eb65e60ea18", "content_id": "bf7d10f32c56b90ed8fb777433afd4b770d320e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 344, "license_type": "no_license", "max_line_length": 109, "num_lines": 11, "path": "/templates/sign-in.html", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n{% load static from staticfiles %}\n\n{% block page_content %}\n <!-- page content -->\n <div class=\"right_col\" role=\"main\" style=\"background-color: #ecedef;padding-top: 0px;margin-top: -5px;\">\n <div class=\"center-div\">\n {% include \"sign-in-form.html\" %}\n </div>\n </div>\n{% endblock %}\n\n" }, { "alpha_fraction": 0.5043478012084961, "alphanum_fraction": 0.5942028760910034, "avg_line_length": 19.294116973876953, "blob_id": "8506d4b70a7b8813858a4dbb5cb9039ceb08a5d0", "content_id": "f44cd06385bfc8e082a73d16d276eaef1bc9bfc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 345, "license_type": "no_license", "max_line_length": 48, "num_lines": 17, "path": "/bplanner/migrations-bak/0012_remove_helpsection_links_and_sources.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-24 09:31\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0011_auto_20180724_0947'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='helpsection',\n name='links_and_sources',\n ),\n ]\n" }, { "alpha_fraction": 0.6148001551628113, "alphanum_fraction": 0.6220680475234985, "avg_line_length": 64.8043441772461, "blob_id": "1f98fa63c368a1d19ebfe20bd8bb6980ba0e93d8", "content_id": "22f1d04375a62896e2c23e00c806beea6b6bda4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3027, "license_type": "no_license", "max_line_length": 169, "num_lines": 46, "path": "/bplanner/migrations-bak/0013_businessplan.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-25 09:01\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('bplanner', '0012_remove_helpsection_links_and_sources'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BusinessPlan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=500, verbose_name='Name')),\n ('title', models.TextField(blank=True, default='', null=True, verbose_name='Title Page')),\n ('mission_vision', models.TextField(blank=True, default='', null=True, verbose_name='Mission and Page')),\n ('executive_summary', models.TextField(blank=True, default='', null=True, verbose_name='Executive Summary')),\n ('company_description', models.TextField(blank=True, default='', null=True, verbose_name='Company Description')),\n ('key_success_factors', models.TextField(blank=True, default='', null=True, verbose_name='Key Success Factors')),\n ('objectives', models.TextField(blank=True, default='', null=True, verbose_name='Objectives')),\n ('industry_analysis', models.TextField(blank=True, default='', null=True, verbose_name='Industry Analysis')),\n ('tam_sam_som_analysis', models.TextField(blank=True, default='', null=True, verbose_name='TAM-SAM-SOM analysis')),\n ('swot_analysis', models.TextField(blank=True, default='', null=True, verbose_name='SWOT Analysis')),\n ('insights', models.TextField(blank=True, default='', null=True, verbose_name='Insights')),\n ('marketing_plan', models.TextField(blank=True, default='', null=True, verbose_name='Marketing Plan')),\n ('ownership_and_management_plan', models.TextField(blank=True, default='', null=True, verbose_name='Ownership and Management Plan')),\n ('milestones', models.TextField(blank=True, default='', null=True, verbose_name='Milestones')),\n ('finance', models.TextField(blank=True, default='', null=True, verbose_name='Finance')),\n ('references', models.TextField(blank=True, default='', null=True, verbose_name='references')),\n ('date_created', models.DateTimeField(editable=False)),\n ('date_modified', models.DateTimeField(editable=False)),\n ('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Business Plan Owner')),\n ],\n options={\n 'verbose_name': 'Business Plan',\n 'verbose_name_plural': 'Business Plans',\n 'ordering': ['-date_modified', '-date_created'],\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5596755743026733, "alphanum_fraction": 0.5955967307090759, "avg_line_length": 30.962963104248047, "blob_id": "4e62d7557e508c784b4f2156d6c74692ea3e3200", "content_id": "44bfb102d3f6e536397750c26445b6fcb53922e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 863, "license_type": "no_license", "max_line_length": 117, "num_lines": 27, "path": "/bplanner/migrations-bak/0006_auto_20180723_1813.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-23 17:13\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0005_auto_20180723_1805'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='helpsection',\n options={'ordering': ['ref_id'], 'verbose_name': 'Help Section', 'verbose_name_plural': 'Help Sections'},\n ),\n migrations.AddField(\n model_name='helpsubsectionexample',\n name='example',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Example'),\n ),\n migrations.AddField(\n model_name='helpsubsectionexample',\n name='instruction',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Instruction'),\n ),\n ]\n" }, { "alpha_fraction": 0.557603657245636, "alphanum_fraction": 0.581413209438324, "avg_line_length": 33.26315689086914, "blob_id": "716a5b22119a8243f10be73d57dcb6f5c1d47774", "content_id": "0ad9d1bf4c0f4c414219835910c19c6717d8ba00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1302, "license_type": "no_license", "max_line_length": 107, "num_lines": 38, "path": "/bplanner/migrations-bak/0023_auto_20180903_0003.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-02 21:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0022_auto_20180902_1955'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='businessplan',\n name='rpt_amortization',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Monthly Amortization'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='rpt_balance_sheet',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Balance Sheet'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='rpt_cash_flow',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Cash Flow'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='rpt_dashboard',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Dashboard'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='rpt_pnl',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Monthly P&L'),\n ),\n ]\n" }, { "alpha_fraction": 0.5253797769546509, "alphanum_fraction": 0.5324194431304932, "avg_line_length": 29.670454025268555, "blob_id": "46b6c24bc1ecb1765d28fcbce1808bdeb1875059", "content_id": "099abc73c9c059520b02f897c5062a84ccc8da4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2699, "license_type": "no_license", "max_line_length": 58, "num_lines": 88, "path": "/bplanner/migrations-bak/0014_auto_20180725_1421.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-25 13:21\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0013_businessplan'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='businessplan',\n old_name='company_description',\n new_name='company_description_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='executive_summary',\n new_name='executive_summary_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='finance',\n new_name='finance_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='industry_analysis',\n new_name='industry_analysis_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='insights',\n new_name='insights_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='key_success_factors',\n new_name='key_success_factors_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='marketing_plan',\n new_name='marketing_plan_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='milestones',\n new_name='milestones_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='mission_vision',\n new_name='mission_vision_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='objectives',\n new_name='objectives_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='ownership_and_management_plan',\n new_name='ownership_and_management_plan_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='references',\n new_name='references_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='swot_analysis',\n new_name='swot_analysis_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='tam_sam_som_analysis',\n new_name='tam_sam_som_analysis_page',\n ),\n migrations.RenameField(\n model_name='businessplan',\n old_name='title',\n new_name='title_page',\n ),\n ]\n" }, { "alpha_fraction": 0.6830520629882812, "alphanum_fraction": 0.6830520629882812, "avg_line_length": 36.86111068725586, "blob_id": "178ce3079d92d648b00bac190825dd89c88d9d5b", "content_id": "04d489890807072a67a970279a66e638fb91fd48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1363, "license_type": "no_license", "max_line_length": 102, "num_lines": 36, "path": "/bplanner/forms.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "__author__ = 'Davies Ray'\nfrom django.forms import ModelForm\nfrom django_summernote.widgets import SummernoteWidget, SummernoteInplaceWidget\nfrom .models import (BusinessPlanTitlePage, BusinessPlanMainContent, BusinessPlanFinancialAssumptions,\n BusinessPlanFinancialDataInput, BusinessPlanSettings)\n\nclass BusinessPlanTitlePageForm(ModelForm):\n class Meta:\n model = BusinessPlanTitlePage\n fields = '__all__'\n exclude = ('date_created', 'date_modified', 'owner', 'title_page')\n\nclass BusinessPlanMainContentForm(ModelForm):\n class Meta:\n model = BusinessPlanMainContent\n fields = '__all__'\n exclude = ('date_created', 'date_modified', 'owner', 'title_page')\n\n\nclass BusinessPlanFinancialAssumptionsForm(ModelForm):\n class Meta:\n model = BusinessPlanFinancialAssumptions\n fields = '__all__'\n exclude = ('date_created', 'date_modified', 'owner', 'title_page')\n\nclass BusinessPlanFinancialDataInputForm(ModelForm):\n class Meta:\n model = BusinessPlanFinancialDataInput\n fields = '__all__'\n exclude = ('date_created', 'date_modified', 'owner', 'title_page')\n\nclass BusinessPlanSettingsForm(ModelForm):\n class Meta:\n model = BusinessPlanSettings\n fields = '__all__'\n exclude = ('date_created', 'date_modified', 'owner', 'title_page')\n" }, { "alpha_fraction": 0.5724331736564636, "alphanum_fraction": 0.6160337328910828, "avg_line_length": 29.913043975830078, "blob_id": "288e286c80695bcd89d669f844f235434cd6b7e3", "content_id": "ab9cb722644ec7bf2d874a4d09abdc68605f83e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 711, "license_type": "no_license", "max_line_length": 145, "num_lines": 23, "path": "/bplanner/migrations-bak/0003_auto_20180723_1751.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-23 16:51\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0002_auto_20180723_1730'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='help',\n options={'ordering': ['ref_id'], 'verbose_name': 'Help Content', 'verbose_name_plural': 'Help Contents'},\n ),\n migrations.AddField(\n model_name='helpsubsection',\n name='help',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.Help', verbose_name='Help'),\n ),\n ]\n" }, { "alpha_fraction": 0.4675324559211731, "alphanum_fraction": 0.4967532455921173, "avg_line_length": 13.045454978942871, "blob_id": "8407cd81736398c3d781e91fedb713507a18bb45", "content_id": "40e67290e6a3c0041742f7fbb8037cb048d3e25b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 27, "num_lines": 22, "path": "/bplanner/choices.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "__author__ = 'user'\n\nCURRENCY_CHOICES = (\n (0, \"USD\"),\n (1, \"POUND\"),\n (2, \"EURO\"),\n)\n\nOFFERING_CHOICES = (\n (0, \"Products\"),\n (1, \"Services\")\n)\n\nTAXATION_SYSTEM_CHOICES = (\n (0, \"Tiered System\"),\n (1, \"Single System\")\n)\n\nBUSINESS_TYPES = (\n (0, \"Non-Profit\"),\n (1, \"For Profit\")\n)" }, { "alpha_fraction": 0.6068580746650696, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 37, "blob_id": "0f5c4188dd3802acb2a2fb4ecf799f68cb9e71d5", "content_id": "a02c3d3588033a41e5066df8ccf7ce38859a4c49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1254, "license_type": "no_license", "max_line_length": 205, "num_lines": 33, "path": "/bplanner/migrations-bak/0010_auto_20180724_0940.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-24 08:40\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0009_auto_20180723_1827'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='helpsubsection',\n name='links_and_sources',\n ),\n migrations.AddField(\n model_name='helpsection',\n name='links_and_sources',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Links and Sources'),\n ),\n migrations.AlterField(\n model_name='helpsubsection',\n name='help_section',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rel_help_sections', to='bplanner.HelpSection', verbose_name='Help Section'),\n ),\n migrations.AlterField(\n model_name='helpsubsectionexample',\n name='help_sub_section',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rel_help_sub_sections', to='bplanner.HelpSubSection', verbose_name='Help Sub-Section'),\n ),\n ]\n" }, { "alpha_fraction": 0.6082289814949036, "alphanum_fraction": 0.663685142993927, "avg_line_length": 28.421052932739258, "blob_id": "8d40d6d3d331b3a0eb4d049b5ef39f10d54ce704", "content_id": "ee92ae40bb488546ab0a315141a1a31b89adeabd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 170, "num_lines": 19, "path": "/bplanner/migrations/0009_businessplanmaincontent_title_page.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-24 14:12\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0008_auto_20180924_1630'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='title_page',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.BusinessPlanTitlePage', verbose_name='Business Plan Title Page'),\n ),\n ]\n" }, { "alpha_fraction": 0.6282528042793274, "alphanum_fraction": 0.6654275059700012, "avg_line_length": 28.88888931274414, "blob_id": "62afbe17e13892f8a0f49f73c4419b62a2c995ae", "content_id": "d332a64c777dccc1484ec7a8827cdc2d0af27e74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 123, "num_lines": 18, "path": "/bplanner/migrations/0017_auto_20181005_0316.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-10-05 00:16\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0016_businessplansettings_projection_years_list_display'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='first_financial_year_month',\n field=models.IntegerField(blank=True, default=1, null=True, verbose_name='Month Index of starting operations'),\n ),\n ]\n" }, { "alpha_fraction": 0.568825900554657, "alphanum_fraction": 0.6315789222717285, "avg_line_length": 26.44444465637207, "blob_id": "6ef8af89c9657d16e4cb38f7df5918fd78595055", "content_id": "b6dde5a5ba6ddbd5191365a1c351fa31f2a92607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 494, "license_type": "no_license", "max_line_length": 122, "num_lines": 18, "path": "/bplanner/migrations/0016_businessplansettings_projection_years_list_display.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-10-04 21:56\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0015_auto_20181004_2351'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='businessplansettings',\n name='projection_years_list_display',\n field=models.TextField(blank=True, default='[]', null=True, verbose_name='Projection years list for Display'),\n ),\n ]\n" }, { "alpha_fraction": 0.5394737124443054, "alphanum_fraction": 0.6074561476707458, "avg_line_length": 24.33333396911621, "blob_id": "4cff3558c3e776fa7ee20374414873aac34046a9", "content_id": "7aa69bd410b110c4c63d71ed0fa80ddf947c7702", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 456, "license_type": "no_license", "max_line_length": 115, "num_lines": 18, "path": "/bplanner/migrations-bak/0021_auto_20180902_1414.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-02 11:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0020_auto_20180902_1352'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplan',\n name='logo',\n field=models.ImageField(default='logo_default.png', null=True, upload_to='imgs/', verbose_name='Logo'),\n ),\n ]\n" }, { "alpha_fraction": 0.7582417726516724, "alphanum_fraction": 0.7582417726516724, "avg_line_length": 17.200000762939453, "blob_id": "dd149b8ec17794ed6626dcc30489d6e5ffd28374", "content_id": "9f727161832c62795328c5c2fc6a7c9ebff8c541", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/bplanner/apps.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass BplannerConfig(AppConfig):\n name = 'bplanner'\n" }, { "alpha_fraction": 0.41432639956474304, "alphanum_fraction": 0.4285714328289032, "avg_line_length": 47.17647171020508, "blob_id": "cc9041dd463d0a32cfe410069577aa0f6653a4a6", "content_id": "74b4321bdb25d2793e9c62a2e1019bce2d74783e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2457, "license_type": "no_license", "max_line_length": 135, "num_lines": 51, "path": "/templates/help.html", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n{% load static from staticfiles %}\n\n{% block page_content %}\n <!-- page content -->\n <div class=\"right_col\" role=\"main\" style=\"background-color: #ffffff;min-height: calc(100vh + 100px);\">\n <div class=\"section-fluid\" >\n <div class=\"clearfix\"></div>\n\n <div class=\"row\" style=\"box-shadow: 0 5px 15px 0 rgba(0,0,0,0.15);margin-bottom: 2px; margin-top: 20px;;padding: 10px 15px;\">\n <br/>\n <div class=\"sm-10 sm-off-1 lg-8 lg-off-2 xxl-6 xxl-off-3\">\n <div class=\"alert-styled-left\">\n <div class=\"alert-info\">\n info\n </div>\n <div class=\"alert-info-text text-small text-muted\">\n <div class=\"alert-heading \"> {{ help_section.title | safe }}</div>\n {{ help_section.description | safe }}\n </div>\n </div>\n <div class=\"textStack\">\n {% for rel_help_sub_section in help_section.rel_help_sections.all %}\n <div class=\"help-subsection\">\n <h6 class=\"help-subsection-title\">\n {{ rel_help_sub_section.title | safe }}\n </h6>\n <p class=\"text-muted help-subsection-instruction\">{{ rel_help_sub_section.instruction | safe }}</p>\n {# Iterating through examples#}\n <div class=\"examples-container\">\n <h6 class=\"examples-container-title\">Examples</h6>\n {% for rel_help_sub_section_example in rel_help_sub_section.rel_help_sub_section_examples.all %}\n <div class=\"example-container\">\n <div class=\"example-title text-muted\">\n <strong>{{ rel_help_sub_section_example.title }}</strong>\n </div>\n <div class=\"example-text text-muted\">\n {{ rel_help_sub_section_example.example | safe }}\n </div>\n </div>\n {% endfor %}\n </div>\n </div>\n {% endfor %}\n </div>\n </div>\n </div>\n\n </div>\n </div>\n{% endblock %}\n" }, { "alpha_fraction": 0.7305579781532288, "alphanum_fraction": 0.7328707575798035, "avg_line_length": 58.655174255371094, "blob_id": "fba67a167a7fe4861770bf5257ffd7fa04f0fb05", "content_id": "32f5f30cc29826528f95e0bb4413f5f1061e6bb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3459, "license_type": "no_license", "max_line_length": 141, "num_lines": 58, "path": "/Project/urls.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "\"\"\"Project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom bplanner.views import (\n LandingPageView, RegisterView, LoginView, LogoutView, PasswordResetView,\n PasswordChangeView, DashboardView, BusinessPlanDetailView, BusinessPlanHelpView, BusinessPlanDeleteView,\n save_title_page, save_main_content_page, save_financial_assumptions_page, save_financial_data_input_page,\n save_bplanner_settings, view_bplan, download_pdf)\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n url(r'^summernote/', include('django_summernote.urls')),\n path('auth/', include('api.urls')),\n url(r'-/register$', RegisterView.as_view(), name='register-page'),\n url(r'-/login$', LoginView.as_view(), name='login-page'),\n url(r'-/password/reset$', PasswordResetView.as_view(), name='password-reset-page'),\n url(r'-/logout$', LogoutView.as_view(), name='logout-page'),\n url(r'-/password/change$', PasswordChangeView.as_view(), name='password-change-page'),\n url(r'-/help', BusinessPlanHelpView.as_view(), name='business-plan-help'),\n url(r'dashboard/new/business-plan/title_page$', save_title_page, name='save-title-page'),\n url(r'dashboard/new/business-plan/main_content_page$', save_main_content_page, name='save-main-content-page'),\n url(r'dashboard/new/business-plan/financial_assumptions_page$', save_financial_assumptions_page, name='save-financial-assumptions-page'),\n url(r'dashboard/new/business-plan/financial_data_input_page$', save_financial_data_input_page, name='save-financial-data-input-page'),\n url(r'dashboard/new/business-plan$', BusinessPlanDetailView.as_view(), name='business-plan-detail'),\n url(r'dashboard/view/business-plan$', BusinessPlanDetailView.as_view(), name='business-plan-view-detail'),\n url(r'dashboard/edit/business-plan$', BusinessPlanDetailView.as_view(), name='business-plan-edit-detail'),\n url(r'dashboard/save/business-plan/settings$', save_bplanner_settings, name='business-plan-save-settings'),\n url(r'dashboard/business-plan/view$', view_bplan, name='business-plan-get-view'),\n url(r'dashboard/business-plan/download$', download_pdf, name='business-plan-download-pdf'),\n url(r'dashboard/delete/business-plan$', BusinessPlanDeleteView.as_view(), name='business-plan-delete-detail'),\n url(r'dashboard$', DashboardView.as_view(), name='dashboard'),\n url(r'', LandingPageView.as_view(), name='landing-page'),\n\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += staticfiles_urlpatterns()" }, { "alpha_fraction": 0.5491143465042114, "alphanum_fraction": 0.5990338325500488, "avg_line_length": 26, "blob_id": "78a83233e5cdb531fd286eca583af4a725aab12b", "content_id": "4bab9a4bf7e71e0101da152c4773c3199fc37772", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "no_license", "max_line_length": 78, "num_lines": 23, "path": "/bplanner/migrations-bak/0016_auto_20180725_2108.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-25 20:08\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0015_auto_20180725_2106'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplan',\n name='date_created',\n field=models.DateTimeField(blank=True, editable=False, null=True),\n ),\n migrations.AlterField(\n model_name='businessplan',\n name='date_modified',\n field=models.DateTimeField(blank=True, editable=False, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.616830587387085, "alphanum_fraction": 0.6243715882301331, "avg_line_length": 71.0472412109375, "blob_id": "cbd6e2566a44a43a96407665c8baaae07911115e", "content_id": "373b2796a834acf7f02bfc07c24f7d31c843f564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9150, "license_type": "no_license", "max_line_length": 175, "num_lines": 127, "path": "/bplanner/migrations/0002_auto_20180907_1557.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-07 12:57\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('bplanner', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BusinessPlanFinancialAssumptions',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('currency', models.IntegerField(blank=True, default=0, max_length=500, null=True, verbose_name='Currency')),\n ('first_financial_year', models.IntegerField(blank=True, null=True, verbose_name='First Financial Year')),\n ('first_financial_year_month', models.CharField(blank=True, max_length=500, null=True, verbose_name='Month of starting operations')),\n ('projection_years', models.IntegerField(blank=True, null=True, verbose_name='Period of Projections in Years')),\n ('offerings_products_or_services', models.CharField(blank=True, default=0, max_length=500, null=True, verbose_name='Products/Services')),\n ('number_of_products_or_services', models.CharField(blank=True, default=1, max_length=500, null=True, verbose_name='Number of Products or Services Offered ')),\n ('product_services_table', models.TextField(blank=True, default='', null=True, verbose_name='Products or Services Offered Table')),\n ('count_of_months_in_financial_year', models.IntegerField(blank=True, default=12, null=True, verbose_name='Number of Months in Projection Year')),\n ('inflation_rate', models.FloatField(blank=True, default=0, null=True, verbose_name='Inflation Rate (Per Annum)')),\n ('salary_growth_rate', models.FloatField(blank=True, default=0, null=True, verbose_name='Salary Growth Rate (Per Annum)')),\n ('amortization_period', models.IntegerField(blank=True, null=True, verbose_name='Startup Cost Amortization Period (in Years) ')),\n ('trade_receivables', models.IntegerField(blank=True, null=True, verbose_name='Trade Receivables (period in months)')),\n ('trade_payables', models.IntegerField(blank=True, null=True, verbose_name='Trade Payables (period in months)')),\n ('other_expenses_payables', models.IntegerField(blank=True, null=True, verbose_name='Other Expenses Payable')),\n ('bad_debts', models.FloatField(blank=True, null=True, verbose_name='Bad Debts (% of revenue)')),\n ('taxation_system', models.IntegerField(blank=True, default=0, null=True, verbose_name='Taxation System')),\n ('corporate_tax_rate', models.FloatField(blank=True, default=20, null=True, verbose_name='Corporate Tax Rate ')),\n ('tax_slabs_table', models.TextField(blank=True, default='', max_length=500, null=True, verbose_name='Tax Slabs')),\n ('size', models.FloatField(blank=True, default=0, null=True, verbose_name='Size')),\n ('date_created', models.DateTimeField(blank=True, null=True, verbose_name='Date Created')),\n ('date_modified', models.DateTimeField(blank=True, null=True, verbose_name='Date Modified')),\n ('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Business Plan Owner')),\n ],\n options={\n 'verbose_name': 'Business Plan FinancialAssumptions',\n 'verbose_name_plural': 'Business Plans FinancialAssumptions',\n 'ordering': ['-date_modified', '-date_created'],\n },\n ),\n migrations.CreateModel(\n name='BusinessPlanFinancialDataInput',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('financial_input', models.TextField(blank=True, default='', null=True, verbose_name='Financial Input')),\n ('size', models.FloatField(blank=True, default=0, null=True, verbose_name='Size')),\n ('date_created', models.DateTimeField(blank=True, null=True, verbose_name='Date Created')),\n ('date_modified', models.DateTimeField(blank=True, null=True, verbose_name='Date Modified')),\n ('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Business Plan Owner')),\n ],\n options={\n 'verbose_name': 'Business Plan Financial Data Input',\n 'verbose_name_plural': 'Business Plans Financial Data Input',\n 'ordering': ['-date_modified', '-date_created'],\n },\n ),\n migrations.CreateModel(\n name='BusinessPlanMainContent',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('main_content', models.TextField(blank=True, default='', null=True, verbose_name='Main Content')),\n ('size', models.FloatField(blank=True, default=0, null=True, verbose_name='Size')),\n ('date_created', models.DateTimeField(blank=True, null=True, verbose_name='Date Created')),\n ('date_modified', models.DateTimeField(blank=True, null=True, verbose_name='Date Modified')),\n ('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Business Plan Owner')),\n ],\n options={\n 'verbose_name': 'Business Plan Main Content',\n 'verbose_name_plural': 'Business Plans Main Content',\n 'ordering': ['-date_modified', '-date_created'],\n },\n ),\n migrations.CreateModel(\n name='BusinessPlanTitlePage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('company_name', models.CharField(default='', max_length=500, verbose_name='Company, Business, or Project name')),\n ('tagline', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Tagline')),\n ('address', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Business address')),\n ('phone_number', models.CharField(blank=True, default='', max_length=15, null=True, verbose_name='Phone Number')),\n ('email', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Email')),\n ('website', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Website')),\n ('presented_to', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Presented to')),\n ('logo', models.ImageField(default='logo_default.png', null=True, upload_to='imgs/', verbose_name='Logo')),\n ('size', models.FloatField(blank=True, default=0, null=True, verbose_name='Size')),\n ('bplan_size', models.FloatField(blank=True, default=0, null=True, verbose_name='Size')),\n ('date_created', models.DateTimeField(blank=True, null=True, verbose_name='Date Created')),\n ('date_modified', models.DateTimeField(blank=True, null=True, verbose_name='Date Modified')),\n ('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Business Plan Owner')),\n ],\n options={\n 'verbose_name': 'Business Plan Title Page',\n 'verbose_name_plural': 'Business Plans Title Page',\n 'ordering': ['-date_modified', '-date_created'],\n },\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='owner',\n ),\n migrations.DeleteModel(\n name='BusinessPlan',\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='title_page',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.BusinessPlanTitlePage', verbose_name='Business Plan Title Page'),\n ),\n migrations.AddField(\n model_name='businessplanfinancialdatainput',\n name='title_page',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.BusinessPlanTitlePage', verbose_name='Business Plan Title Page'),\n ),\n migrations.AddField(\n model_name='businessplanfinancialassumptions',\n name='title_page',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.BusinessPlanTitlePage', verbose_name='Business Plan Title Page'),\n ),\n ]\n" }, { "alpha_fraction": 0.5878311991691589, "alphanum_fraction": 0.6074582934379578, "avg_line_length": 35.39285659790039, "blob_id": "a2528af26080677217a1f796112b861fe8bb9028", "content_id": "b9c957e9032f3cf35e9aaffb2f6c411d41194ae5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1019, "license_type": "no_license", "max_line_length": 182, "num_lines": 28, "path": "/bplanner/migrations-bak/0025_profile.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-03 10:28\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('bplanner', '0024_businessplan_size'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('usage', models.FloatField(blank=True, default=0, null=True, verbose_name='Space Used')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='user_profile', to=settings.AUTH_USER_MODEL, verbose_name='User')),\n ],\n options={\n 'verbose_name': 'User Profile',\n 'verbose_name_plural': 'User Profiles',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6184210777282715, "alphanum_fraction": 0.6694079041481018, "avg_line_length": 31, "blob_id": "51d4889ae1a232178e675be297961bbf5cd1573e", "content_id": "c0669af3ad1b8a94771fca6e74bbf25f980d905b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 213, "num_lines": 19, "path": "/bplanner/migrations-bak/0011_auto_20180724_0947.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-24 08:47\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0010_auto_20180724_0940'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='helpsubsectionexample',\n name='help_sub_section',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rel_help_sub_section_examples', to='bplanner.HelpSubSection', verbose_name='Help Sub-Section'),\n ),\n ]\n" }, { "alpha_fraction": 0.5678392052650452, "alphanum_fraction": 0.5899497270584106, "avg_line_length": 32.16666793823242, "blob_id": "4c6890b7c5fb99d6f0f1b792145ec0a497588ca4", "content_id": "07980a7cb6fb468579928d7fdf03d6102da9b58d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "no_license", "max_line_length": 117, "num_lines": 30, "path": "/bplanner/migrations-bak/0002_auto_20180723_1730.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-23 16:30\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='help',\n options={'verbose_name': 'Help Content', 'verbose_name_plural': 'Help Contents'},\n ),\n migrations.AlterModelOptions(\n name='helpsubsection',\n options={'verbose_name': 'Help Sub-Section', 'verbose_name_plural': 'Help Sub-Sections'},\n ),\n migrations.AlterModelOptions(\n name='helpsubsectionexample',\n options={'verbose_name': 'Help Sub-Section Example', 'verbose_name_plural': 'Help Sub-Section Examples'},\n ),\n migrations.AddField(\n model_name='help',\n name='ref_id',\n field=models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Ref Ids'),\n ),\n ]\n" }, { "alpha_fraction": 0.6043165326118469, "alphanum_fraction": 0.631654679775238, "avg_line_length": 29.217391967773438, "blob_id": "bd49e5c3ba8bfd58a668ded994d0a3924edd05ed", "content_id": "bb766915a883baa2a19dc6cc52c70dfadf559281", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 695, "license_type": "no_license", "max_line_length": 90, "num_lines": 23, "path": "/bplanner/migrations/0015_auto_20181004_2351.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-10-04 20:51\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0014_businessplansettings_month_list_initiated'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='businessplansettings',\n name='year_list_initiated',\n field=models.BooleanField(default=False, verbose_name='Year List Initiated'),\n ),\n migrations.AlterField(\n model_name='businessplansettings',\n name='month_list_initiated',\n field=models.BooleanField(default=False, verbose_name='Month List Initiated'),\n ),\n ]\n" }, { "alpha_fraction": 0.5570175647735596, "alphanum_fraction": 0.625, "avg_line_length": 24.33333396911621, "blob_id": "2042d24b47decc797b93ca478f2f0f6e3182f25e", "content_id": "230726aa50f3ccfd6be122307713d8ad53dfcb05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 456, "license_type": "no_license", "max_line_length": 84, "num_lines": 18, "path": "/bplanner/migrations/0007_auto_20180911_1241.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-11 09:41\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0006_auto_20180911_1231'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='tax_slabs_table',\n field=models.TextField(blank=True, null=True, verbose_name='Tax Slabs'),\n ),\n ]\n" }, { "alpha_fraction": 0.5887592434883118, "alphanum_fraction": 0.5982800722122192, "avg_line_length": 39.197532653808594, "blob_id": "8a5e1dad145ee4de07189743323f12d9924835c2", "content_id": "94a837e297ece4d9840c835640e1f385fa6046c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3256, "license_type": "no_license", "max_line_length": 116, "num_lines": 81, "path": "/bplanner/migrations/0008_auto_20180924_1630.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-24 13:30\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0007_auto_20180911_1241'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='businessplanmaincontent',\n name='main_content',\n ),\n migrations.RemoveField(\n model_name='businessplanmaincontent',\n name='title_page',\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='company_description',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Company Description'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='executive_summary',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Executive Summary'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='industry_analysis',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Industry Analysis'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='insights',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Insights'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='key_success_factors',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Key Success Factors'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='marketing_plan',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Marketing Plan'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='milestones',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Milestones'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='mission_vision',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Mission and Vision'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='objectives',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Objectives'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='ownership_and_management_plan',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Ownership and Management Plan'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='swot_analysis',\n field=models.TextField(blank=True, default='', null=True, verbose_name='SWOT Analysis'),\n ),\n migrations.AddField(\n model_name='businessplanmaincontent',\n name='tam_sam_som_analysis',\n field=models.TextField(blank=True, default='', null=True, verbose_name='TAM-SAM-SOM Analysis'),\n ),\n ]\n" }, { "alpha_fraction": 0.8336291313171387, "alphanum_fraction": 0.8340727686882019, "avg_line_length": 30.760562896728516, "blob_id": "27c2d405d823842fb670db714de0471d00e434b6", "content_id": "5836dc86ab4b7e57e555e5f542f259fae535babb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2254, "license_type": "no_license", "max_line_length": 117, "num_lines": 71, "path": "/bplanner/admin.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django_summernote.admin import SummernoteModelAdmin\n\n# Register your models here.\n\nfrom .models import (\n Profile, HelpSection, HelpSubSection, HelpSubSectionExample,\n BusinessPlanTitlePage, BusinessPlanMainContent, BusinessPlanFinancialAssumptions, BusinessPlanFinancialDataInput,\n BusinessPlanSettings, BusinessPlanSample,\n Currency, Month\n)\n\nclass ProfileAdmin(admin.ModelAdmin):\n pass\n\nclass HelpSectionAdmin(admin.ModelAdmin):\n list_display = ('ref_id', 'title')\n\nclass HelpSubSectionExampleInline(admin.StackedInline):\n model = HelpSubSectionExample\n extra = 1\n\nclass HelpSubSectionAdmin(SummernoteModelAdmin): # instead of ModelAdmin\n inlines = [HelpSubSectionExampleInline,]\n\n# class HelpSubSectionAdmin(admin.ModelAdmin):\n# inlines = [HelpSubSectionExampleInline,]\n\nclass HelpSubSectionExampleAdmin(SummernoteModelAdmin):\n pass\n\nclass BusinessPlanTitlePageAdmin(SummernoteModelAdmin):\n pass\n\nclass BusinessPlanMainContentAdmin(SummernoteModelAdmin):\n pass\n\nclass BusinessPlanFinancialAssumptionsAdmin(SummernoteModelAdmin):\n pass\n\nclass BusinessPlanFinancialDataInputAdmin(SummernoteModelAdmin):\n pass\n\nclass BusinessPlanSettingsAdmin(SummernoteModelAdmin):\n pass\n\nclass BusinessPlanSampleAdmin(SummernoteModelAdmin):\n pass\n\n\n# class HelpSubSectionExampleAdmin(admin.ModelAdmin):\n# pass\n\nclass CurrencyAdmin(admin.ModelAdmin):\n pass\n\nclass MonthAdmin(admin.ModelAdmin):\n pass\n\nadmin.site.register(Profile, ProfileAdmin)\nadmin.site.register(HelpSection, HelpSectionAdmin)\nadmin.site.register(HelpSubSection, HelpSubSectionAdmin)\nadmin.site.register(HelpSubSectionExample, HelpSubSectionExampleAdmin)\nadmin.site.register(BusinessPlanTitlePage, BusinessPlanTitlePageAdmin)\nadmin.site.register(BusinessPlanMainContent, BusinessPlanMainContentAdmin)\nadmin.site.register(BusinessPlanFinancialAssumptions, BusinessPlanFinancialAssumptionsAdmin)\nadmin.site.register(BusinessPlanFinancialDataInput, BusinessPlanFinancialDataInputAdmin)\nadmin.site.register(BusinessPlanSettings, BusinessPlanSettingsAdmin)\nadmin.site.register(BusinessPlanSample, BusinessPlanSampleAdmin)\nadmin.site.register(Currency, CurrencyAdmin)\nadmin.site.register(Month, MonthAdmin)" }, { "alpha_fraction": 0.5965092182159424, "alphanum_fraction": 0.6303901672363281, "avg_line_length": 33.78571319580078, "blob_id": "0f6dba831c87761dc9def4f44440c0e175403362", "content_id": "e4bd075e73c4b9771f23600b6bcbf9e3f8c2c5b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 974, "license_type": "no_license", "max_line_length": 109, "num_lines": 28, "path": "/bplanner/migrations/0006_auto_20180911_1231.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-11 09:31\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0005_auto_20180911_0234'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='corporate_tax_rate',\n field=models.IntegerField(blank=True, default=20, null=True, verbose_name='Corporate Tax Rate '),\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='inflation_rate',\n field=models.IntegerField(blank=True, null=True, verbose_name='Inflation Rate (Per Annum)'),\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='salary_growth_rate',\n field=models.IntegerField(blank=True, null=True, verbose_name='Salary Growth Rate (Per Annum)'),\n ),\n ]\n" }, { "alpha_fraction": 0.5523690581321716, "alphanum_fraction": 0.5947631001472473, "avg_line_length": 28.703702926635742, "blob_id": "818b3eb9e19b5ba44cb7b7807d89f6d088bb5dce", "content_id": "366af7c9838316b559659c32b07e0de2d705eda8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 116, "num_lines": 27, "path": "/bplanner/migrations-bak/0007_auto_20180723_1823.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-23 17:23\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0006_auto_20180723_1813'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='helpsubsectionexample',\n name='instruction',\n ),\n migrations.AddField(\n model_name='helpsubsection',\n name='instruction',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Instruction'),\n ),\n migrations.AlterField(\n model_name='helpsubsectionexample',\n name='title',\n field=models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Example Title'),\n ),\n ]\n" }, { "alpha_fraction": 0.5548654198646545, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 25.83333396911621, "blob_id": "08964d00edd5a1f2afba8079a172d3a39a23e471", "content_id": "2bd865a61b1bfbd6d69506e203107084d3d5ccdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 483, "license_type": "no_license", "max_line_length": 133, "num_lines": 18, "path": "/bplanner/migrations/0011_auto_20181002_1818.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-10-02 15:18\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0010_auto_20181001_1534'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplantitlepage',\n name='logo',\n field=models.ImageField(blank=True, default='/imgs/logo_default.png', null=True, upload_to='imgs/', verbose_name='Logo'),\n ),\n ]\n" }, { "alpha_fraction": 0.5717975497245789, "alphanum_fraction": 0.5924586653709412, "avg_line_length": 41.08695602416992, "blob_id": "a2a796807fda11f56c2b8b7131c6f2e14061dc38", "content_id": "015a41588dadbdb0a407aa391c1d8541c830abe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1936, "license_type": "no_license", "max_line_length": 167, "num_lines": 46, "path": "/bplanner/migrations-bak/0005_auto_20180723_1805.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-23 17:05\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0004_auto_20180723_1757'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='HelpSection',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('ref_id', models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Ref Id')),\n ('title', models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Help Section Title')),\n ('description', models.TextField(blank=True, default='', null=True, verbose_name='Desription')),\n ],\n options={\n 'verbose_name': 'Help Section Content',\n 'verbose_name_plural': 'Help Section Contents',\n 'ordering': ['ref_id'],\n },\n ),\n migrations.AlterField(\n model_name='helpsubsection',\n name='help',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.HelpSection', verbose_name='Help Section'),\n ),\n migrations.AlterField(\n model_name='helpsubsectionexample',\n name='help_sub_section',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.HelpSubSection', verbose_name='Help Sub-Section'),\n ),\n migrations.AlterField(\n model_name='helpsubsectionexample',\n name='title',\n field=models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Help Sub-Section Example Title'),\n ),\n migrations.DeleteModel(\n name='Help',\n ),\n ]\n" }, { "alpha_fraction": 0.5435897707939148, "alphanum_fraction": 0.5923076868057251, "avg_line_length": 20.66666603088379, "blob_id": "83e9c7e42e260ef00f2e7675cb60bf6c926c91a1", "content_id": "5a1c9955279c2eb181f103fb94aafbac5c663cdc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 62, "num_lines": 18, "path": "/bplanner/migrations-bak/0009_auto_20180723_1827.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-23 17:27\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0008_helpsubsection_links_and_sources'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='helpsubsection',\n old_name='help',\n new_name='help_section',\n ),\n ]\n" }, { "alpha_fraction": 0.6297222375869751, "alphanum_fraction": 0.6365670561790466, "avg_line_length": 70, "blob_id": "e53cfe45b6ed679659f25e09f38cd5adf3aee63d", "content_id": "bf259548ea87049398b02b12a22f98ae8623095f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7597, "license_type": "no_license", "max_line_length": 182, "num_lines": 107, "path": "/bplanner/migrations/0003_auto_20180910_1213.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-10 09:13\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0002_auto_20180907_1557'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BusinessPlanSettings',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('step_monitor', models.TextField(blank=True, default='', null=True, verbose_name='Step Monitor')),\n ('calendarMonths', models.TextField(blank=True, default='', null=True, verbose_name='Step Monitor')),\n ('projectionMonthsList', models.TextField(blank=True, default='', null=True, verbose_name='Step Monitor')),\n ('projectionYears', models.IntegerField(blank=True, default=0, null=True, verbose_name='Number of projection years')),\n ('first_financial_year', models.IntegerField(blank=True, default=2018, null=True, verbose_name='First Financial Year')),\n ('last_financial_year', models.IntegerField(blank=True, null=True, verbose_name='Last Financial Year')),\n ('count_of_months_in_financial_year', models.IntegerField(blank=True, default=12, null=True, verbose_name='Count of months in a financial year')),\n ('projection_years_list', models.TextField(blank=True, default='[]', null=True, verbose_name='Projection years list')),\n ('product_count', models.IntegerField(blank=True, null=True, verbose_name='Products/Services count')),\n ('products', models.TextField(blank=True, default='', null=True, verbose_name='Step Monitor')),\n ('theme', models.TextField(blank=True, default='', null=True, verbose_name='Theme')),\n ('cost_appropriation_methods', models.TextField(blank=True, default='', null=True, verbose_name='Cost Appropriation Methods')),\n ('operating_cost_list', models.TextField(blank=True, default='', null=True, verbose_name='Operating Costs List')),\n ('employees_list', models.TextField(blank=True, default='', null=True, verbose_name='Employees List')),\n ('capital_sources_list', models.TextField(blank=True, default='', null=True, verbose_name='Capital Sources List')),\n ('tangible_assets_list', models.TextField(blank=True, default='', null=True, verbose_name='Tangible Assets List')),\n ('intangible_assets_list', models.TextField(blank=True, default='', null=True, verbose_name='Intangible Assets List')),\n ('deposit_item_list', models.TextField(blank=True, default='', null=True, verbose_name='Deposit Items List')),\n ('startup_cost_item_list', models.TextField(blank=True, default='', null=True, verbose_name='Startup Cost Items List')),\n ('total_assets', models.TextField(blank=True, default='', null=True, verbose_name='Total Assets')),\n ('total_liabilities', models.TextField(blank=True, default='', null=True, verbose_name='Total Liabilities')),\n ('tangible_assets_balance_total', models.TextField(blank=True, default='', null=True, verbose_name='Tangible Assets Balance Total')),\n ('intangible_assets_balance_total', models.TextField(blank=True, default='', null=True, verbose_name='Intangible Assets Balance Total')),\n ('cashFlow_changes_during_the_year_per_month', models.TextField(blank=True, default='', null=True, verbose_name='Cash Flow Changes During The Year Per Month')),\n ('closing_cash_balance_per_month', models.TextField(blank=True, default='', null=True, verbose_name='Closing Cash Balance Per Month')),\n ('revenue_totals_per_year', models.TextField(blank=True, default='', null=True, verbose_name='Revenue Totals Per Year')),\n ('direct_cost_totals_per_year', models.TextField(blank=True, default='', null=True, verbose_name='Direct Cost Totals Per Year')),\n ('gross_profit', models.TextField(blank=True, default='', null=True, verbose_name='Gross Profit')),\n ('operating_cost_totals_per_year', models.TextField(blank=True, default='', null=True, verbose_name='Operating Cost Totals Per Year')),\n ('eat', models.TextField(blank=True, default='', null=True, verbose_name='EAT')),\n ('net_margin_per_month', models.TextField(blank=True, default='', null=True, verbose_name='Net Margin Per Month')),\n ],\n options={\n 'verbose_name': 'Business Plan Settings',\n 'verbose_name_plural': 'Business Plans Settings',\n 'ordering': ['-id'],\n },\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='currency',\n field=models.IntegerField(choices=[(0, 'USD'), (1, 'POUND'), (2, 'EURO')], default=0, verbose_name='Currency'),\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='number_of_products_or_services',\n field=models.IntegerField(blank=True, default=1, null=True, verbose_name='Number of Products or Services Offered '),\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='offerings_products_or_services',\n field=models.IntegerField(choices=[(0, 'Products'), (1, 'Services')], default=0, verbose_name='Products/Services'),\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='taxation_system',\n field=models.IntegerField(choices=[(0, 'Slab System'), (1, 'Single System')], default=0, verbose_name='Taxation System'),\n ),\n migrations.AlterField(\n model_name='businessplanmaincontent',\n name='owner',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Business Plan Owner'),\n ),\n migrations.AlterField(\n model_name='businessplanmaincontent',\n name='title_page',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.BusinessPlanTitlePage', verbose_name='Business Plan Title Page'),\n ),\n migrations.AlterField(\n model_name='businessplantitlepage',\n name='company_name',\n field=models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Company, Business, or Project name'),\n ),\n migrations.AlterField(\n model_name='businessplantitlepage',\n name='logo',\n field=models.ImageField(blank=True, default='logo_default.png', null=True, upload_to='imgs/', verbose_name='Logo'),\n ),\n migrations.AlterField(\n model_name='businessplantitlepage',\n name='owner',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Business Plan Owner'),\n ),\n migrations.AddField(\n model_name='businessplansettings',\n name='title_page',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.BusinessPlanTitlePage', verbose_name='Business Plan Title Page'),\n ),\n ]\n" }, { "alpha_fraction": 0.5458515286445618, "alphanum_fraction": 0.6135371327400208, "avg_line_length": 24.44444465637207, "blob_id": "21fbb03645bbf0d979650efffb688690ba30860f", "content_id": "a5b6d0ce5e3d612b475cc259d4b7c900d3d91bb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 458, "license_type": "no_license", "max_line_length": 104, "num_lines": 18, "path": "/bplanner/migrations-bak/0008_helpsubsection_links_and_sources.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-23 17:26\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0007_auto_20180723_1823'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='helpsubsection',\n name='links_and_sources',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Links and Sources'),\n ),\n ]\n" }, { "alpha_fraction": 0.563265323638916, "alphanum_fraction": 0.6306122541427612, "avg_line_length": 26.22222137451172, "blob_id": "f5e67564544aefacdbf89369241c8532d2840f8d", "content_id": "3c861f21db12a51fbdf44da7499644a78ff08a43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 490, "license_type": "no_license", "max_line_length": 127, "num_lines": 18, "path": "/bplanner/migrations/0014_businessplansettings_month_list_initiated.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-10-04 20:20\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0013_auto_20181004_1627'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='businessplansettings',\n name='month_list_initiated',\n field=models.CharField(blank=True, default='false', max_length=10, null=True, verbose_name='Month List Initiated'),\n ),\n ]\n" }, { "alpha_fraction": 0.5598631501197815, "alphanum_fraction": 0.5986316800117493, "avg_line_length": 30.321428298950195, "blob_id": "48b8584aca8376f4f4adca7e80a74147d9e7410c", "content_id": "1cadc873ad53ddbc092c55707a5ef27884e15c3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 877, "license_type": "no_license", "max_line_length": 114, "num_lines": 28, "path": "/bplanner/migrations-bak/0022_auto_20180902_1955.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-02 16:55\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0021_auto_20180902_1414'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplan',\n name='company_name',\n field=models.CharField(default='', max_length=500, verbose_name='Company, Business, or Project name'),\n ),\n migrations.AlterField(\n model_name='businessplan',\n name='date_created',\n field=models.DateTimeField(blank=True, null=True, verbose_name='Date Created'),\n ),\n migrations.AlterField(\n model_name='businessplan',\n name='date_modified',\n field=models.DateTimeField(blank=True, null=True, verbose_name='Date Modified'),\n ),\n ]\n" }, { "alpha_fraction": 0.6963470578193665, "alphanum_fraction": 0.7054794430732727, "avg_line_length": 38.84848403930664, "blob_id": "e6e8a60ea58c5a87a59c64b9cfe9d3edcd7d81fc", "content_id": "c526269c79927667993eba8d646fdeb861d406c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1314, "license_type": "no_license", "max_line_length": 109, "num_lines": 33, "path": "/api/views.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "from django.http import Http404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view\nfrom django.contrib.auth import authenticate, login\n\n# Create your views here.\n\n@csrf_exempt\n@api_view(['GET', 'POST'])\ndef sign_up(request):\n \"\"\"\n Attempts login and returns status\n \"\"\"\n # retrieve email and password\n #email = request.data.get('email');\n email = request.POST.get('email', None)\n password = request.POST.get('password', None)\n if email is None or password is None: # confirm all details passed\n return Response({'status': 'ERR', 'message': 'Missing details'}, status=status.HTTP_201_CREATED)\n\n # check if user matches these details\n user = authenticate(request, username=email, password=password)\n if user is not None:\n login(request, user)\n # Redirect to a success page.\n else:\n # Return an 'invalid login' error message.\n return Response({'status': 'ERR', 'message': 'Invalid credentials.'}, status=status.HTTP_201_CREATED)\n # finally return success message\n return Response({'status': 'SUCCESS', 'message': 'Sign up successful.'}, status=status.HTTP_201_CREATED)" }, { "alpha_fraction": 0.6217127442359924, "alphanum_fraction": 0.6258395314216614, "avg_line_length": 46.34865951538086, "blob_id": "1fb9eca347b94e53d4a50fcf15e61a5039cfbf30", "content_id": "b41618188995ce280cab259a8b4a54ead9e7d359", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37075, "license_type": "no_license", "max_line_length": 217, "num_lines": 783, "path": "/bplanner/views.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "import sys, os\nimport settings\nfrom io import BytesIO\nimport xhtml2pdf.pisa as pisa\nfrom django.template.loader import get_template\nfrom MySQLdb.converters import NoneType\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse, HttpResponse\nfrom django.views import View\nfrom bplanner.models import *\nfrom bplanner.forms import (BusinessPlanTitlePageForm, BusinessPlanMainContentForm,\n BusinessPlanFinancialAssumptionsForm, BusinessPlanFinancialDataInputForm, BusinessPlanSettingsForm,\n )\n\n\n# common functions\ndef get_size(obj, seen=None):\n \"\"\"Recursively finds size of objects\"\"\"\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size\n\ndef compose_default_settings():\n bplan_settings = BusinessPlanSettings()\n bplan_settings.step_monitor = \"\"\"{\n '#step-1': {\n 'passed' : false,\n 'auto_generate': false,\n 'validate_steps': [],\n 'friendly_name': 'Title page'\n },\n '#step-2': {\n 'passed' : false,\n 'auto_generate': false,\n 'validate_steps': ['#page_title', ],\n 'friendly_name': 'Main content page'\n },\n '#step-3': {\n 'passed' : false,\n 'auto_generate': false,\n 'validate_steps': ['#page_title', ],\n 'friendly_name': 'Financial assumptions page'\n },\n '#step-4': {\n 'passed' : false,\n 'auto_generate': true,\n 'validate_steps': ['#page_title', '#financial_assumptions'],\n 'friendly_name': 'Financial data input page'\n },\n '#step-5': {\n 'passed' : false,\n 'auto_generate': true,\n 'validate_steps': ['#page_title', '#financial_assumptions', '#financial_data_input'],\n 'friendly_name': 'Reporting page'\n }\n }\"\"\"\n bplan_settings.calendar_months = \"\"\"{\n 'January': {'name': 'January', 'order': 1, 'code': 'Jan', 'next': 'February','previous': 'December' },\n 'February': {'name': 'February', 'order': 2, 'code': 'Feb', 'next': 'March', 'previous': 'January'},\n 'March': {'name': 'March', 'order': 3, 'code': 'Mar', 'next': 'April', 'previous': 'February'},\n 'April': {'name': 'April', 'order': 4, 'code': 'Apr', 'next': 'May', 'previous': 'March'},\n 'May': {'name': 'May', 'order': 5, 'code': 'May', 'next': 'June', 'previous': 'April'},\n 'June': {'name': 'June', 'order': 6, 'code': 'Jun', 'next': 'July', 'previous': 'May'},\n 'July': {'name': 'July', 'order': 7, 'code': 'Jul', 'next': 'August', 'previous': 'June'},\n 'August': {'name': 'August', 'order': 8, 'code': 'Aug', 'next': 'September', 'previous': 'July'},\n 'September': {'name': 'September', 'order': 9, 'code': 'Sep', 'next': 'October', 'previous': 'August'},\n 'October': {'name': 'October', 'order': 10, 'code': 'Oct', 'next': 'November', 'previous': 'September'},\n 'November': {'name': 'November', 'order': 11, 'code': 'Nov', 'next': 'December', 'previous': 'October'},\n 'December': {'name': 'December', 'order': 12, 'code': 'Dec', 'next': 'January', 'previous': 'November'}\n }\"\"\"\n bplan_settings.projection_months_list = \"\"\"{}\"\"\"\n bplan_settings.projection_years = None\n bplan_settings.first_financial_year = 2018\n bplan_settings.last_financial_year = None\n bplan_settings.count_of_months_in_financial_year = 12\n bplan_settings.projection_years_list = \"\"\"[]\"\"\"\n bplan_settings.product_count = None\n bplan_settings.products = \"\"\"{}\"\"\"\n bplan_settings.theme = \"\"\"{}\"\"\"\n bplan_settings.cost_appropriation_methods = \"\"\"['Per Month', 'Per Annum', '% of Revenue', '% of Employee Salary']\"\"\"\n bplan_settings.operating_cost_list = \"\"\"[\n 'Rent and Rates', 'Heat and Light', 'Insurances', 'Marketing/Advertisement', 'Printing & Stationary',\n 'Misc. Expenses'\n ]\"\"\"\n bplan_settings.employees_list = \"\"\"[\n 'Director', 'Account Manager', 'Additional Account Manager', 'Coordinator', 'Additional Coordinator',\n 'Quality Control Manager', 'Marketing Officer', 'Receptionist', 'Human Resource Manager', 'Secretary'\n ]\"\"\"\n bplan_settings.capital_sources_list = \"\"\"['Share Capital', 'Debt', 'Annual Interest Rate', 'Loan Period (In months)']\"\"\"\n bplan_settings.tangible_assets_list = \"\"\"['Computers', 'Printers', 'Furniture and Fixtures', 'Office Equipment', 'Fit Outs']\"\"\"\n bplan_settings.intangible_assets_list = \"\"\"['Website Development', 'Patents & Trademarks']\"\"\"\n bplan_settings.deposit_item_list = \"\"\"['Rental Deposits', 'Other Deposits']\"\"\"\n bplan_settings.startup_cost_item_list = \"\"\"[\n 'Legal Expenses', 'Formation Expenses', 'Marketing Costs', 'Utility',\n 'Stationery', 'Business Name Registration Cost'\n ]\"\"\"\n bplan_settings.total_assets = \"\"\"{}\"\"\"\n bplan_settings.total_liabilities = \"\"\"{}\"\"\"\n bplan_settings.tangible_assets_balance_total = \"\"\"{}\"\"\"\n bplan_settings.intangible_assets_balance_total = \"\"\"{}\"\"\"\n bplan_settings.cashFlow_changes_during_the_year_per_month = \"\"\"{}\"\"\"\n bplan_settings.closing_cash_balance_per_month = \"\"\"{}\"\"\"\n bplan_settings.revenue_totals_per_year = \"\"\"{}\"\"\"\n bplan_settings.direct_cost_totals_per_year = \"\"\"{}\"\"\"\n bplan_settings.gross_profit = \"\"\"{}\"\"\"\n bplan_settings.operating_cost_totals_per_year = \"\"\"{}\"\"\"\n bplan_settings.eat = \"\"\"{}\"\"\"\n bplan_settings.net_margin_per_month = \"\"\"{}\"\"\"\n return bplan_settings\n\n# Create your views here.\nclass LandingPageView(View):\n \"\"\"docstring for LandingPageView.\"\"\"\n\n def get(self, request):\n if settings.LANGING_PAGE_ENABLED:\n return render(request, 'index.html', {'name': \"Davies Ray\", 'page': 'landing', 'page_type': 'anonymous'})\n else:\n return redirect('dashboard')\n\n def post(self, request):\n if settings.LANGING_PAGE_ENABLED:\n return render(request, 'index.html', {'name': \"Davies Ray\", 'page': 'landing', 'page_type': 'anonymous'})\n else:\n return redirect('dashboard')\n\nclass RegisterView(View):\n \"\"\"docstring for Register.\"\"\"\n\n def is_null_or_empty(self, val):\n return val is None or val == ''\n\n def get(self, request):\n if request.user.is_authenticated:\n return redirect('dashboard')\n return render(request, 'sign-up.html', {'name': \"Davies Ray\", 'page_type': 'auth'})\n\n\n def post(self, request):\n if request.user.is_authenticated:\n return redirect('dashboard')\n email = request.POST.get('email', None)\n password = request.POST.get('password', None)\n confirm_password = request.POST.get('confirmPasswordInput', None)\n if self.is_null_or_empty(email) or self.is_null_or_empty(password) or self.is_null_or_empty(confirm_password):\n return render(request, 'sign-up.html', {'status': 'ERR', 'message': 'Missing required sign up details.', 'page_type': 'auth'})\n if password != confirm_password:\n return render(request, 'sign-up.html', {'status': 'ERR', 'message': 'Passwords provided do not match.', 'page_type': 'auth'})\n # everything is good so far.\n # create user\n user, created = User.objects.get_or_create(username=email, email=email)\n if not created: #user exists... show message\n return render(request, 'sign-up.html', {'status': 'ERR', 'message': 'User with {} exists. Login or use a different email.'.format(email), 'page_type': 'auth'})\n else: # user created.. Set password, remember to use a hashing function\n user.set_password(password) # This line will hash the password\n user.save() #DO NOT FORGET THIS LINE\n return render(request, 'sign-in.html', {'status': 'SUCCESS', 'message': 'Sign in to your account using email: {} and the password used for registration'.format(email), 'page_type': 'auth'})\n\nclass LoginView(View):\n \"\"\"docstring for LandingPageView.\"\"\"\n\n def get(self, request):\n if request.user.is_authenticated:\n return redirect('dashboard')\n return render(request, 'sign-in.html', {'status': 'NEW', 'message': 'Enter email and password to sign in.', 'page_type': 'auth'})\n\n def post(self, request):\n if request.user.is_authenticated:\n return redirect('dashboard')\n # retrieve email and password\n #email = request.data.get('email');\n email = request.POST.get('email', None)\n password = request.POST.get('password', None)\n\n if email is None or password is None: # confirm all details passed\n return render(request, 'sign-in.html', {'status': 'ERR', 'message': 'Missing details', 'page_type': 'auth'})\n\n # check if user matches these details\n user = authenticate(request, username=email, password=password)\n if user is not None:\n login(request, user)\n # Create a corresponding Profile id does not exist\n user_profile_list = Profile.objects.filter(user=user)\n\n if user_profile_list.count() < 1:\n profile = Profile.objects.create(user=user,usage=0.00)\n profile.save()\n return redirect('dashboard')\n # Redirect to a success page.\n else:\n # Return an 'invalid login' error message.\n return render(request, 'sign-in.html', {'status': 'ERR', 'message': 'Invalid username/password.', 'page_type': 'auth'})\n\nclass LogoutView(View):\n \"\"\"docstring for LandingPageView.\"\"\"\n\n def get(self, request):\n logout(request);\n return redirect('login-page')\n\n\n def post(self, request):\n logout(request);\n return redirect('login-page')\n\nclass PasswordResetView(View):\n \"\"\"docstring for LandingPageView.\"\"\"\n\n def get(self, request):\n return render(request, 'reset-password.html', {'name': \"Davies Ray\", 'page_type': 'auth'})\n\n\n def post(self, request):\n return render(request, 'reset-password.html', {'name': \"Post action\", 'page_type': 'auth'})\n\nclass PasswordChangeView(View):\n \"\"\"docstring for LandingPageView.\"\"\"\n\n def get(self, request):\n return render(request, 'index.html', {'name': \"Davies Ray\", 'page_type': 'auth'})\n\n\n def post(self, request):\n return render(request, 'index.html', {'name': \"Post action\", 'page_type': 'auth'})\n\nclass DashboardView(View):\n \"\"\"docstring for DashboardView.\"\"\"\n\n def get(self, request):\n if not request.user.is_authenticated:\n return redirect('login-page')\n # get business plans\n # get current user\n if not request.user.is_authenticated: # check if user is authenticated\n return redirect('login-page');\n bplans = BusinessPlanTitlePage.objects.filter(owner=request.user).order_by('-date_created') # order by date_created desc\n user_profile = Profile.objects.get(user=request.user)\n bplan_samples = BusinessPlanSample.objects.all(); # get's all business plan samples\n return render(request, 'dashboard.html', {'page': 'dashboard', 'user': request.user, 'user_profile': user_profile, 'bplans': bplans, 'bplan_samples': bplan_samples, 'menu_width': 'full' })\n\n def post(self, request):\n if not request.user.is_authenticated: # check if user is authenticated\n return redirect('login-page');\n return render(request, 'dashboard.html', {'page': 'dashboard', 'user': request.user})\n\nclass BusinessPlanDetailView(View):\n \"\"\"docstring for BusinessPlanDetailView.\"\"\"\n def get_size(self, obj, seen=None):\n \"\"\"Recursively finds size of objects\"\"\"\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([self.get_size(v, seen) for v in obj.values()])\n size += sum([self.get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += self.get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([self.get_size(i, seen) for i in obj])\n return size\n\n def get(self, request):\n if not request.user.is_authenticated:\n return redirect('login-page')\n id = request.GET.get('id', None)\n mode = 'edit' if id else 'new'\n bplan_title_page = None\n bplan_settings = BusinessPlanTitlePage()\n\n if id is not None:\n # this is a fresh get\n bplan_title_page = BusinessPlanTitlePage.objects.get(id=id)\n bplan_title_page_form = BusinessPlanTitlePageForm(instance=bplan_title_page)\n try:\n bplan_settings = BusinessPlanSettings.objects.get(title_page=bplan_title_page)\n except:\n bplan_settings = compose_default_settings()\n bplan_settings.title_page = bplan_title_page\n bplan_settings.save()\n else:\n bplan_title_page_form = BusinessPlanTitlePageForm()\n bplan_settings = compose_default_settings()\n # Get remaining models and their corresponding forms\n\n\n bplan_main_content_page = None\n bplan_main_content_page_form = None\n try:\n bplan_main_content_page = BusinessPlanMainContent.objects.get(title_page=bplan_title_page)\n bplan_main_content_page_form = BusinessPlanMainContentForm(instance=bplan_main_content_page)\n except:\n bplan_main_content_page = BusinessPlanMainContent()\n bplan_main_content_page_form = BusinessPlanMainContentForm()\n\n\n bplan_financial_assumptions_page = None\n bplan_financial_assumptions_page_form = None\n try:\n bplan_financial_assumptions_page = BusinessPlanFinancialAssumptions.objects.get(title_page=bplan_title_page)\n bplan_financial_assumptions_page_form = BusinessPlanFinancialAssumptionsForm(instance=bplan_financial_assumptions_page)\n except:\n bplan_financial_assumptions_page = BusinessPlanFinancialAssumptions()\n bplan_financial_assumptions_page_form = BusinessPlanFinancialAssumptionsForm()\n\n bplan_financial_data_input_page = None\n bplan_financial_data_input_page_form = None\n try:\n bplan_financial_data_input_page = BusinessPlanFinancialDataInput.objects.get(title_page=bplan_title_page)\n bplan_financial_data_input_page_form = BusinessPlanFinancialDataInputForm(instance=bplan_financial_data_input_page)\n except:\n bplan_financial_data_input_page = BusinessPlanFinancialDataInput()\n bplan_financial_data_input_page_form = BusinessPlanFinancialDataInputForm()\n\n\n currencies = Currency.objects.all()\n months = Month.objects.all()\n bplan_samples = BusinessPlanSample.objects.all(); # get's all business plan samples\n return render(request, 'business-plan.html', {\n 'page': 'bplan',\n 'id': id,\n 'bplan_title_page_form': bplan_title_page_form,\n 'bplan_main_content_page_form': bplan_main_content_page_form,\n 'bplan_financial_assumptions_page_form': bplan_financial_assumptions_page_form,\n 'bplan_financial_data_input_page_form': bplan_financial_data_input_page_form,\n 'bplan_settings': bplan_settings,\n 'currencies': currencies,\n 'months': months,\n 'mode': mode,\n 'bplan_samples': bplan_samples,\n 'menu_width': 'full'\n })\n\nclass BusinessPlanDeleteView(View):\n def get(self, request):\n if not request.user.is_authenticated:\n return redirect('login-page')\n # determine if new or edit form\n # check if id is set\n id = request.GET.get('id', None)\n business_plan_title_page = None\n try:\n business_plan_title_page = BusinessPlanTitlePage.objects.get(id=id)\n except:\n # unable to get business plan with id. Redirect to dashboard\n return redirect('dashboard');\n\n try:\n BusinessPlanMainContent.objects.get(title_page=business_plan_title_page).delete()\n except:\n pass\n\n try:\n BusinessPlanFinancialAssumptions.objects.get(title_page=business_plan_title_page).delete()\n except:\n pass\n\n try:\n BusinessPlanFinancialDataInput.objects.get(title_page=business_plan_title_page).delete()\n except:\n pass\n\n try:\n BusinessPlanSettings.objects.get(title_page=business_plan_title_page).delete()\n except:\n pass\n\n try:\n # adjust usage size\n profile = Profile.objects.get(user=request.user)\n profile.usage -= business_plan_title_page.bplan_size\n profile.save()\n business_plan_title_page.delete()\n except Exception as err:\n pass\n\n return redirect('dashboard');\n\ndef save_title_page(request):\n if not request.user.is_authenticated:\n return JsonResponse({'status':401, 'message': 'Authentication Error'})\n if request.method == 'GET':\n return JsonResponse({'status':500, 'message': 'Save action does not allow GET'})\n bplanner_id = request.POST.get('id', None) # Be careful about this while doing a post!!\n if bplanner_id is not None and bplanner_id != '':\n # Update\n bplan = BusinessPlanTitlePage.objects.get(id=bplanner_id)\n form = BusinessPlanTitlePageForm(request.POST, request.FILES, instance=bplan)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.owner = request.user\n model_instance.date_modified = timezone.datetime.now();\n bplan_size = get_size(model_instance)\n model_instance.bplan_size = bplan_size - bplan.size # Overall change in size in Mbs # round to 2 dps..\n model_instance.size = bplan_size\n model_instance.save()\n # get model size\n # update profile usage size\n user_profile = Profile.objects.get(user=request.user)\n user_profile.usage += bplan_size - bplan.size\n user_profile.save()\n\n # form.save();\n return JsonResponse({'status': 200, 'message': 'Title page updated!', 'id': model_instance.id})\n else:\n pass\n return JsonResponse({'status': 500, 'message': 'An error occurred while updating Business plan. Please try again or contact system admin.'})\n else:\n form = BusinessPlanTitlePageForm(request.POST, request.FILES)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.owner = request.user\n model_instance.date_created = timezone.datetime.now();\n bplan_size = get_size(model_instance)\n model_instance.bplan_size += (bplan_size - model_instance.size)\n\n model_instance.size = bplan_size\n print(\"After printing new\")\n model_instance.save()\n # update profile usage size\n user_profile = Profile.objects.get(user=request.user)\n user_profile.usage += bplan_size\n user_profile.save()\n return JsonResponse({'status': 200, 'message': 'Title page save!', 'id': model_instance.id})\n return JsonResponse({'status': 500, 'message': 'An error occurred while creating Business plan. Please try again or contact system admin.'})\n\ndef save_main_content_page(request):\n if not request.user.is_authenticated:\n return JsonResponse({'status':401, 'message': 'Authentication Error'})\n\n if request.method == 'GET':\n return JsonResponse({'status':500, 'message': 'Save action does not allow GET'})\n\n # get business plan id:- you cannot proceed if this is not retrieved\n title_page_id = request.POST.get('title_page_id', None)\n if title_page_id is None or title_page_id == '':\n return JsonResponse({'status': 500, 'message': 'An error occurred while updating Business plan. Please try again or contact system admin.'})\n title_page = BusinessPlanTitlePage.objects.filter(id=title_page_id).first()\n content_page = None\n try:\n content_page = BusinessPlanMainContent.objects.get(title_page_id=title_page_id)\n except Exception as err:\n content_page = None\n\n if content_page is not None:\n form = BusinessPlanMainContentForm(request.POST, files=None, instance=content_page)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.title_page = title_page\n model_instance.owner = request.user\n model_instance.date_modified = timezone.datetime.now();\n content_page_size = get_size(model_instance)\n\n # update title page size\n title_page.bplan_size += content_page_size - content_page.size # Overall change in size in Mbs # round to 2 dps..\n title_page.save()\n # update profile usage size\n user_profile = Profile.objects.get(user=request.user)\n user_profile.usage += content_page_size - content_page.size\n user_profile.save()\n\n model_instance.size = content_page_size\n model_instance.save()\n\n # get model size\n\n return JsonResponse({'status': 200, 'message': 'Main content updated successfully!', 'id': model_instance.id})\n return JsonResponse({'status': 500, 'message': 'An error occurred while updating Business plan. Please try again or contact system admin.'})\n else:\n form = BusinessPlanMainContentForm(request.POST)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.title_page = title_page\n model_instance.owner = request.user\n model_instance.date_created = timezone.datetime.now();\n content_page_size = get_size(model_instance)\n model_instance.size = content_page_size\n model_instance.save()\n title_page.bplan_size += content_page_size\n # update profile usage size\n user_profile = Profile.objects.get(user=request.user)\n user_profile.usage += content_page_size\n user_profile.save()\n return JsonResponse({'status': 200, 'message': 'Main content saved successfully!', 'id': model_instance.id})\n return JsonResponse({'status': 500, 'message': 'An error occurred while creating Business plan. Please try again or contact system admin.'})\n\ndef save_financial_assumptions_page(request):\n if not request.user.is_authenticated:\n return JsonResponse({'status':401, 'message': 'Authentication Error'})\n\n if request.method == 'GET':\n return JsonResponse({'status':500, 'message': 'Save action does not allow GET'})\n\n # get business plan id:- you cannot proceed if this is not retrieved\n title_page_id = request.POST.get('title_page_id', None)\n if title_page_id is None or title_page_id == '':\n return JsonResponse({'status': 500, 'message': 'An error occurred while updating Business plan. Please try again or contact system admin.'})\n title_page = BusinessPlanTitlePage.objects.get(id=title_page_id)\n try:\n assumptions_page = BusinessPlanFinancialAssumptions.objects.get(title_page=title_page)\n except:\n assumptions_page = None\n\n if assumptions_page is not None:\n form = BusinessPlanFinancialAssumptionsForm(request.POST, files=None, instance=assumptions_page)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.title_page = title_page\n model_instance.owner = request.user\n model_instance.date_modified = timezone.datetime.now();\n assumptions_page_size = get_size(model_instance)\n\n title_page.bplan_size += assumptions_page_size - assumptions_page.size\n title_page.save()\n\n # update profile usage size\n user_profile = Profile.objects.get(user=request.user)\n user_profile.usage += assumptions_page_size - assumptions_page.size\n user_profile.save()\n\n model_instance.size = assumptions_page_size\n model_instance.save()\n\n\n # get model size\n\n return JsonResponse({'status': 200, 'message': 'Financial assumptions data updated successfully!', 'id': model_instance.id})\n else:\n print('Invalid: errors')\n print(form.errors)\n return JsonResponse({'status': 500, 'message': 'An error occurred while updating Business plan. Please try again or contact system admin.'})\n else:\n form = BusinessPlanFinancialAssumptionsForm(request.POST)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.title_page = title_page\n model_instance.owner = request.user\n model_instance.date_created = timezone.datetime.now();\n assumptions_page_size = get_size(model_instance)\n\n title_page.bplan_size += assumptions_page_size\n title_page.save()\n\n # update profile usage size\n user_profile = Profile.objects.get(user=request.user)\n user_profile.usage += assumptions_page_size\n user_profile.save()\n\n model_instance.size = assumptions_page_size\n model_instance.save()\n return JsonResponse({'status': 200, 'message': 'Financial assumptions data saved successfully!', 'id': model_instance.id})\n else:\n print('Errors')\n print(form.errors)\n return JsonResponse({'status': 500, 'message': 'An error occurred while creating Business plan. Please try again or contact system admin.'})\n\ndef save_financial_data_input_page(request):\n if not request.user.is_authenticated:\n return JsonResponse({'status':401, 'message': 'Authentication Error'})\n\n if request.method == 'GET':\n return JsonResponse({'status':500, 'message': 'Save action does not allow GET'})\n\n # get business plan id:- you cannot proceed if this is not retrieved\n title_page_id = request.POST.get('title_page_id', None)\n\n if title_page_id is None or title_page_id == '':\n return JsonResponse({'status': 500, 'message': 'An error occurred while updating Business plan. Please try again or contact system admin.'})\n title_page = BusinessPlanTitlePage.objects.filter(id=title_page_id).first()\n\n try:\n data_input_page = BusinessPlanFinancialDataInput.objects.get(title_page=title_page)\n except:\n data_input_page = None\n if data_input_page is not None:\n # Update\n form = BusinessPlanFinancialDataInputForm(request.POST, files=None, instance=data_input_page)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.title_page = title_page\n model_instance.owner = request.user\n model_instance.date_modified = timezone.datetime.now();\n data_input_page_size = get_size(model_instance)\n\n title_page.bplan_size += data_input_page_size - data_input_page.size # Overall change in size in Mbs # round to 2 dps..\n title_page.save()\n\n # update profile usage size\n user_profile = Profile.objects.get(user=request.user)\n user_profile.usage += data_input_page_size - data_input_page.size\n user_profile.save()\n\n model_instance.size = data_input_page_size\n model_instance.save()\n # get model size\n return JsonResponse({'status': 200, 'message': 'Business plan updated successfully!', 'id': model_instance.id})\n else:\n print('BusinessPlanFinancialDataInputForm Errors')\n print(form.errors)\n return JsonResponse({'status': 500, 'message': 'An error occurred while updating Business plan. Please try again or contact system admin.'})\n else:\n form = BusinessPlanFinancialDataInputForm(request.POST)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.title_page = title_page\n model_instance.owner = request.user\n model_instance.date_created = timezone.datetime.now();\n data_input_page_size = get_size(model_instance)\n\n title_page.bplan_size += data_input_page_size\n title_page.save()\n\n model_instance.size = data_input_page_size\n model_instance.save()\n\n # update profile usage size\n user_profile = Profile.objects.get(user=request.user)\n user_profile.usage += data_input_page_size\n user_profile.save()\n return JsonResponse({'status': 200, 'message': 'Business plan created successfully!', 'id': model_instance.id})\n return JsonResponse({'status': 500, 'message': 'An error occurred while creating Business plan. Please try again or contact system admin.'})\n\ndef save_bplanner_settings(request):\n if request.method == 'GET':\n return JsonResponse({'status':500, 'message': 'Save action does not allow GET'})\n\n # get business plan id:- you cannot proceed if this is not retrieved\n title_page_id = request.POST.get('title_page_id', None)\n if title_page_id is None or title_page_id == '':\n return JsonResponse({'status': 500, 'message': 'An error occurred while updating Business plan. Please try again or contact system admin.'})\n title_page = BusinessPlanTitlePage.objects.filter(id=title_page_id).first()\n\n month_initiated = request.POST.get('month_list_initiated', False) in ['true', '1']\n year_initiated = request.POST.get('year_list_initiated', False) in ['true', '1']\n\n try:\n bplanner_settings = BusinessPlanSettings.objects.get(title_page=title_page)\n except:\n bplanner_settings = None\n if bplanner_settings is not None :\n form = BusinessPlanSettingsForm(request.POST, files=None, instance=bplanner_settings)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.title_page = title_page\n try:\n model_instance.month_list_initiated = month_initiated\n model_instance.year_list_initiated = year_initiated\n except Exception as err:\n print(err)\n model_instance.save()\n return JsonResponse({'status': 200, 'message': 'Business plan updated successfully!', 'id': model_instance.id})\n else:\n print(form.errors)\n return JsonResponse({'status': 500, 'message': 'An error occurred while updating Business plan. Please try again or contact system admin.'})\n else:\n form = BusinessPlanSettingsForm(request.POST)\n if form.is_valid():\n model_instance = form.save(commit=False)\n model_instance.title_page = title_page\n model_instance.month_list_initiated = month_initiated\n model_instance.year_list_initiated = year_initiated\n model_instance.save()\n return JsonResponse({'status': 200, 'message': 'Business plan created successfully!', 'id': model_instance.id})\n return JsonResponse({'status': 500, 'message': 'An error occurred while creating Business plan. Please try again or contact system admin.'})\n\nclass BusinessPlanHelpView(View):\n \"\"\"docstring for BusinessPlanHelpView.\"\"\"\n\n def get(self, request):\n if not request.user.is_authenticated:\n return redirect('login-page')\n section = request.GET.get('section', None)\n # get find help model\n help_section = HelpSection.objects.get(ref_id=section) if section is not None else HelpSection.objects.first()\n bplan_samples = BusinessPlanSample.objects.all(); # get's all business plan samples\n return render(request, 'help.html', {'page': 'guide', 'help_section': help_section, 'bplan_samples': bplan_samples, 'menu_width': 'fluid'})\n\n\n def post(self, request):\n if not request.user.is_authenticated:\n return redirect('login-page')\n bplan_samples = BusinessPlanSample.objects.all(); # get's all business plan samples\n return render(request, 'help.html', {'page': 'guide', 'name': \"Post action\", 'bplan_samples': bplan_samples, 'menu_width': 'fluid'})\n\ndef view_bplan(request):\n if not request.user.is_authenticated:\n return redirect('login-page')\n\n if request.method == 'POST':\n return JsonResponse({'status':500, 'message': 'Get action does not allow POST'})\n # check if sample\n sample = request.GET.get('sample', None)\n sample_id = request.GET.get('id', None)\n if sample_id == None:\n return JsonResponse({'status': 500, 'message': 'Could not process request...'})\n\n bplan_samples = BusinessPlanSample.objects.all(); # get's all business plan samples\n # we have a valid id. get sample\n if sample:\n sample = BusinessPlanSample.objects.get(id=sample_id);\n bplan_main_content_page = BusinessPlanMainContent.objects.get(title_page=sample.title_page)\n # data = jsonpickle.encode(sample)\n return render(request, 'view-business-plan.html', {'view': True, 'sample': sample, 'bplan_main_content_page': bplan_main_content_page, 'title_page': sample.title_page})\n else:\n # actual business pla\n title_page = BusinessPlanTitlePage.objects.get(id=sample_id)\n try:\n bplan_main_content_page = BusinessPlanMainContent.objects.get(title_page=title_page)\n except:\n # data = jsonpickle.encode(sample)\n bplan_main_content_page = None\n return render(request, 'view-business-plan.html', {'sample': sample, 'bplan_samples': bplan_samples, 'view': True, 'sample': None, 'bplan_main_content_page': bplan_main_content_page, 'title_page': title_page})\n\ndef link_callback(uri, rel):\n # use short variable names\n sUrl = settings.STATIC_URL # Typically /static/\n sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/\n mUrl = settings.MEDIA_URL # Typically /static/media/\n mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/\n\n # convert URIs to absolute system paths\n if uri.startswith(mUrl):\n path = os.path.join(mRoot, uri.replace(mUrl, \"\"))\n elif uri.startswith(sUrl):\n path = os.path.join(sRoot, uri.replace(sUrl, \"\"))\n\n # make sure that file exists\n if not os.path.isfile(path):\n raise Exception('media URI must start with %s or %s' % (sUrl, mUrl))\n return path\n\ndef download_pdf(request):\n if not request.user.is_authenticated:\n return redirect('login-page')\n\n if request.method == 'POST':\n return JsonResponse({'status':500, 'message': 'Get action does not allow POST'})\n bplan_id = request.GET.get('id', None)\n if bplan_id == None:\n return JsonResponse({'status': 500, 'message': 'Could not process request...'})\n\n # we have a valid id. get sample\n bplan = BusinessPlanTitlePage.objects.get(id=bplan_id);\n try:\n bplan_main_content_page = BusinessPlanMainContent.objects.get(title_page=bplan)\n except:\n bplan_main_content_page = BusinessPlanMainContent() # create a new instance\n # data = jsonpickle.encode(sample)\n context = {'view': False, 'sample': None, 'bplan_main_content_page': bplan_main_content_page, 'title_page': bplan}\n\n template = get_template('download-template.html')\n html = template.render(context)\n # print(html); # print pred before\n # return HttpResponse(html);\n response = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"UTF-8\")), response)\n if not pdf.err:\n return HttpResponse(response.getvalue(), content_type='application/pdf')\n else:\n return HttpResponse(\"Error Rendering PDF\", status=400)\n\n # return render(request, 'sample-business-plan.html', )\n\n" }, { "alpha_fraction": 0.5281583070755005, "alphanum_fraction": 0.7123287916183472, "avg_line_length": 17.25, "blob_id": "8381ac982cdc8f0d4f6349954ce10f7d96b7fdd9", "content_id": "cf0ae6bc86f04e0bad3d16c96ca0203f00ce6dec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 657, "license_type": "no_license", "max_line_length": 28, "num_lines": 36, "path": "/requirements.txt", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "awsebcli==3.14.4\nbotocore==1.11.6\ncement==2.8.2\ncertifi==2018.4.16\nchardet==3.0.4\ncolorama==0.3.9\ndefusedxml==0.5.0\nDjango==2.0.7\ndjango-allauth==0.36.0\ndjango-filter==1.1.0\ndjango-rest-auth==0.9.3\ndjango-summernote==0.8.8.7\ndjango-webpack-loader==0.6.0\ndjango-widget-tweaks==1.4.2\ndjangorestframework==3.8.2\ndocutils==0.14\net-xmlfile==1.0.1\nidna==2.7\njdcal==1.4\njmespath==0.9.3\nmysqlclient==1.3.13\noauthlib==2.1.0\nopenpyxl==2.5.5\npathspec==0.5.5\nPillow==5.2.0\npython-dateutil==2.7.3\npython3-openid==3.1.0\npytz==2018.5\nPyYAML==3.13\nrequests==2.19.1\nrequests-oauthlib==1.0.0\nsemantic-version==2.5.0\nsix==1.11.0\ntabulate==0.7.5\ntermcolor==1.1.0\nurllib3==1.23\n" }, { "alpha_fraction": 0.3571428656578064, "alphanum_fraction": 0.523809552192688, "avg_line_length": 12.666666984558105, "blob_id": "15562aa83876566146599366c9fd8c659656c5f8", "content_id": "51915ca757199cb3c3224ca3a08665c762de7e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 42, "license_type": "no_license", "max_line_length": 32, "num_lines": 3, "path": "/static/js/style-summernote-widget.js", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "/**\n * Created by user on 7/26/2018.\n */\n\n" }, { "alpha_fraction": 0.5401069521903992, "alphanum_fraction": 0.5815507769584656, "avg_line_length": 25.714284896850586, "blob_id": "6403b1a74873e1e2c71e5cac37d5ad4a62ad9091", "content_id": "37d641310504ccaa0e8f57bea3f6ccaf308cee3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 748, "license_type": "no_license", "max_line_length": 48, "num_lines": 28, "path": "/bplanner/migrations/0004_auto_20180910_1227.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-10 09:27\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0003_auto_20180910_1213'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='businessplansettings',\n old_name='calendarMonths',\n new_name='calendar_months',\n ),\n migrations.RenameField(\n model_name='businessplansettings',\n old_name='projectionMonthsList',\n new_name='projection_months_list',\n ),\n migrations.RenameField(\n model_name='businessplansettings',\n old_name='projectionYears',\n new_name='projection_years',\n ),\n ]\n" }, { "alpha_fraction": 0.5279069542884827, "alphanum_fraction": 0.6023255586624146, "avg_line_length": 22.88888931274414, "blob_id": "6c7f09902d9bbb8d43e3f7df1897352dd581cd6c", "content_id": "c0f58bcfcd6582207252ed7aa6f3e750f3cd7dbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/bplanner/migrations-bak/0024_businessplan_size.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-03 10:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0023_auto_20180903_0003'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='businessplan',\n name='size',\n field=models.FloatField(blank=True, default=0, null=True, verbose_name='Size'),\n ),\n ]\n" }, { "alpha_fraction": 0.5444468855857849, "alphanum_fraction": 0.5529232621192932, "avg_line_length": 33.85606002807617, "blob_id": "22312a17a1903e34fd37ff69137d2690fda3e015", "content_id": "a0c03bf96581e04211cb48740078bf268f5c4c07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4601, "license_type": "no_license", "max_line_length": 127, "num_lines": 132, "path": "/bplanner/migrations-bak/0020_auto_20180902_1352.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-02 10:52\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0019_month'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='businessplan',\n name='company_description_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='executive_summary_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='finance_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='industry_analysis_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='insights_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='key_success_factors_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='marketing_plan_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='milestones_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='mission_vision_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='name',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='objectives_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='ownership_and_management_plan_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='references_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='swot_analysis_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='tam_sam_som_analysis_page',\n ),\n migrations.RemoveField(\n model_name='businessplan',\n name='title_page',\n ),\n migrations.AddField(\n model_name='businessplan',\n name='address',\n field=models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Business address'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='company_name',\n field=models.CharField(blank=True, default='Missing Name', max_length=500, null=True, verbose_name='Company Name'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='email',\n field=models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Email'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='financial_assumptions',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Financial Assumptions'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='financial_input',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Financial Input'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='logo',\n field=models.ImageField(default='logo_default.png', upload_to='imgs/', verbose_name='Logo'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='main_content',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Main Content'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='phone_number',\n field=models.CharField(blank=True, default='', max_length=15, null=True, verbose_name='Phone Number'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='presented_to',\n field=models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Presented to'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='tagline',\n field=models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Tagline'),\n ),\n migrations.AddField(\n model_name='businessplan',\n name='website',\n field=models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Website'),\n ),\n ]\n" }, { "alpha_fraction": 0.5286040902137756, "alphanum_fraction": 0.6064073443412781, "avg_line_length": 23.27777862548828, "blob_id": "61dc77a0bc4d196c96c2c0e187f64575c7dc572e", "content_id": "cb061282f4b2ea2591a0707bcf7fc0365bc098ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 96, "num_lines": 18, "path": "/bplanner/migrations-bak/0017_auto_20180726_0939.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-26 08:39\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0016_auto_20180725_2108'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplan',\n name='name',\n field=models.CharField(default='Missing Name', max_length=500, verbose_name='Name'),\n ),\n ]\n" }, { "alpha_fraction": 0.5618776679039001, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 38.05555725097656, "blob_id": "453d8002f13fa4349322c49c5872f11f80c301e5", "content_id": "58face1bc2b66bc7d0946e47e72b8adc3d857efb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1406, "license_type": "no_license", "max_line_length": 143, "num_lines": 36, "path": "/bplanner/migrations-bak/0001_initial.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-23 16:15\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Help',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Help Titles')),\n ('description', models.TextField(blank=True, default='', null=True, verbose_name='Desription')),\n ],\n ),\n migrations.CreateModel(\n name='HelpSubSection',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Help Sub-Section Titles')),\n ],\n ),\n migrations.CreateModel(\n name='HelpSubSectionExample',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Help Sub-Section Example Titles')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.5752361416816711, "alphanum_fraction": 0.5835165977478027, "avg_line_length": 60.341270446777344, "blob_id": "bb200f500c89478ce5f0963c369ecc5c0137593b", "content_id": "f786e54f318f0d541313ccbbf31c4531f59dee23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7729, "license_type": "no_license", "max_line_length": 233, "num_lines": 126, "path": "/bplanner/migrations/0001_initial.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-09-03 13:21\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BusinessPlan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('company_name', models.CharField(default='', max_length=500, verbose_name='Company, Business, or Project name')),\n ('tagline', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Tagline')),\n ('address', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Business address')),\n ('phone_number', models.CharField(blank=True, default='', max_length=15, null=True, verbose_name='Phone Number')),\n ('email', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Email')),\n ('website', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Website')),\n ('presented_to', models.CharField(blank=True, default='', max_length=500, null=True, verbose_name='Presented to')),\n ('logo', models.ImageField(default='logo_default.png', null=True, upload_to='imgs/', verbose_name='Logo')),\n ('main_content', models.TextField(blank=True, default='', null=True, verbose_name='Main Content')),\n ('financial_assumptions', models.TextField(blank=True, default='', null=True, verbose_name='Financial Assumptions')),\n ('financial_input', models.TextField(blank=True, default='', null=True, verbose_name='Financial Input')),\n ('rpt_pnl', models.TextField(blank=True, default='', null=True, verbose_name='Monthly P&L')),\n ('rpt_amortization', models.TextField(blank=True, default='', null=True, verbose_name='Monthly Amortization')),\n ('rpt_cash_flow', models.TextField(blank=True, default='', null=True, verbose_name='Cash Flow')),\n ('rpt_balance_sheet', models.TextField(blank=True, default='', null=True, verbose_name='Balance Sheet')),\n ('rpt_dashboard', models.TextField(blank=True, default='', null=True, verbose_name='Dashboard')),\n ('size', models.FloatField(blank=True, default=0, null=True, verbose_name='Size')),\n ('date_created', models.DateTimeField(blank=True, null=True, verbose_name='Date Created')),\n ('date_modified', models.DateTimeField(blank=True, null=True, verbose_name='Date Modified')),\n ('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Business Plan Owner')),\n ],\n options={\n 'verbose_name': 'Business Plan',\n 'verbose_name_plural': 'Business Plans',\n 'ordering': ['-date_modified', '-date_created'],\n },\n ),\n migrations.CreateModel(\n name='Currency',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('code', models.CharField(blank=True, default='', max_length=150, null=True, verbose_name='Codes')),\n ('full_name', models.CharField(blank=True, default='', max_length=250, null=True)),\n ('rate_to_dollar', models.FloatField(blank=True, default=0.0, null=True, verbose_name='Rate to Dollar')),\n ],\n options={\n 'verbose_name': 'Currency',\n 'verbose_name_plural': 'Currencies',\n },\n ),\n migrations.CreateModel(\n name='HelpSection',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('ref_id', models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Ref Id')),\n ('title', models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Help Section Title')),\n ('description', models.TextField(blank=True, default='', null=True, verbose_name='Desription')),\n ],\n options={\n 'verbose_name': 'Help Section',\n 'verbose_name_plural': 'Help Sections',\n 'ordering': ['ref_id'],\n },\n ),\n migrations.CreateModel(\n name='HelpSubSection',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Help Sub-Section Title')),\n ('instruction', models.TextField(blank=True, default='', null=True, verbose_name='Instruction')),\n ('help_section', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rel_help_sections', to='bplanner.HelpSection', verbose_name='Help Section')),\n ],\n options={\n 'verbose_name': 'Help Sub-Section',\n 'verbose_name_plural': 'Help Sub-Sections',\n },\n ),\n migrations.CreateModel(\n name='HelpSubSectionExample',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Example Title')),\n ('example', models.TextField(blank=True, default='', null=True, verbose_name='Example')),\n ('help_sub_section', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rel_help_sub_section_examples', to='bplanner.HelpSubSection', verbose_name='Help Sub-Section')),\n ],\n options={\n 'verbose_name': 'Help Sub-Section Example',\n 'verbose_name_plural': 'Help Sub-Section Examples',\n },\n ),\n migrations.CreateModel(\n name='Month',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('code', models.CharField(blank=True, default='', max_length=150, null=True, verbose_name='Codes')),\n ('full_name', models.CharField(blank=True, default='', max_length=250, null=True)),\n ('order', models.FloatField(blank=True, default=0, null=True, verbose_name='Month order')),\n ],\n options={\n 'verbose_name': 'Month',\n 'verbose_name_plural': 'Months',\n },\n ),\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('usage', models.FloatField(blank=True, default=0, null=True, verbose_name='Space Used')),\n ('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='user_profile', to=settings.AUTH_USER_MODEL, verbose_name='User')),\n ],\n options={\n 'verbose_name': 'User Profile',\n 'verbose_name_plural': 'User Profiles',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.592989981174469, "alphanum_fraction": 0.6130185723304749, "avg_line_length": 42.6875, "blob_id": "cb79dd6359c7d1d8f45b3443f826135ebddd14c2", "content_id": "6045c156bdfe54fecae1be1a1fcba0b7f2ac9293", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1398, "license_type": "no_license", "max_line_length": 184, "num_lines": 32, "path": "/bplanner/migrations/0010_auto_20181001_1534.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-10-01 12:34\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0009_businessplanmaincontent_title_page'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BusinessPlanSample',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('display_name', models.CharField(blank=True, max_length=500, null=True, verbose_name='Display Name')),\n ('business_types', models.IntegerField(choices=[(0, 'Non-Profit'), (1, 'For Profit')], default=1, verbose_name='Business Type')),\n ('title_page', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.BusinessPlanTitlePage', verbose_name='Business Plan Title Page')),\n ],\n options={\n 'verbose_name': 'Business Plan Sample',\n 'verbose_name_plural': 'Business Plans Samples',\n },\n ),\n migrations.AlterField(\n model_name='businessplanfinancialassumptions',\n name='taxation_system',\n field=models.IntegerField(choices=[(0, 'Tiered System'), (1, 'Single System')], default=0, verbose_name='Taxation System'),\n ),\n ]\n" }, { "alpha_fraction": 0.5204300880432129, "alphanum_fraction": 0.5623655915260315, "avg_line_length": 34.769229888916016, "blob_id": "8328b66f085b5d9f0624b9aa0cf8b6aae34c9e34", "content_id": "14ff458a67d917b77f2897634ec48198a1d16df7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 930, "license_type": "no_license", "max_line_length": 121, "num_lines": 26, "path": "/bplanner/migrations-bak/0018_currency.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-08-02 10:58\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0017_auto_20180726_0939'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Currency',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('code', models.CharField(blank=True, default='', max_length=150, null=True, verbose_name='Codes')),\n ('full_name', models.CharField(blank=True, default='', max_length=250, null=True)),\n ('rate_to_dollar', models.FloatField(blank=True, default=0.0, null=True, verbose_name='Rate to Dollar')),\n ],\n options={\n 'verbose_name': 'Currency',\n 'verbose_name_plural': 'Currencies',\n },\n ),\n ]\n" }, { "alpha_fraction": 0.5954774022102356, "alphanum_fraction": 0.6381909251213074, "avg_line_length": 32.16666793823242, "blob_id": "dcaf92aadd6612edcb40a4adc10cdc8d99fd9a17", "content_id": "b09f23a5b8f8a3896eea803899c07a5a741f386f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 796, "license_type": "no_license", "max_line_length": 168, "num_lines": 24, "path": "/bplanner/migrations-bak/0004_auto_20180723_1757.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-07-23 16:57\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0003_auto_20180723_1751'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='helpsubsectionexample',\n name='help_sub_section',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='bplanner.HelpSubSection', verbose_name='Help Sub-Sections'),\n ),\n migrations.AlterField(\n model_name='helpsubsection',\n name='title',\n field=models.CharField(blank=True, default='', max_length=250, null=True, verbose_name='Help Sub-Section Title'),\n ),\n ]\n" }, { "alpha_fraction": 0.5374149680137634, "alphanum_fraction": 0.6077097654342651, "avg_line_length": 23.5, "blob_id": "3a29af2aef1b3a43ef04df44ccac36807f2daff1", "content_id": "6e92084375015d41a75a12b89ce0bfe5e5fe41a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "no_license", "max_line_length": 91, "num_lines": 18, "path": "/bplanner/migrations/0012_auto_20181004_1546.py", "repo_name": "daviesray-ornyx/bplanner", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0.7 on 2018-10-04 12:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bplanner', '0011_auto_20181002_1818'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='businessplantitlepage',\n name='logo',\n field=models.TextField(blank=True, default='', null=True, verbose_name='Logo'),\n ),\n ]\n" } ]
52
Shrutika148/Blogproject1
https://github.com/Shrutika148/Blogproject1
30967a6e23412402d947861e5e830aea2386c1da
81dec234b9ab851b1165c06a374f2e1b2b8744f8
4afcb48335b918d6ef42b922683072a65306d9be
refs/heads/main
2023-07-13T00:30:26.371811
2021-08-28T19:34:04
2021-08-28T19:34:04
400,875,739
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7401459813117981, "alphanum_fraction": 0.7430657148361206, "avg_line_length": 33.25, "blob_id": "22b5673bd0de6d605efcf123b961bc3ac557be34", "content_id": "695eec0aabcfb6ac94ef8b894cf66f8086b19402", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 685, "license_type": "no_license", "max_line_length": 101, "num_lines": 20, "path": "/BlogProject1/blog/templatetags/blog_tags.py", "repo_name": "Shrutika148/Blogproject1", "src_encoding": "UTF-8", "text": "from blog.models import Post\nfrom django import template\nfrom django.db.models import Count\n\nregister=template.Library()\[email protected]_tag(name='my_tags')\ndef total_posts(): #is a name of custom template tag\n post_list=Post.objects.filter(status__exact='published')\n return post_list.count()\n\n\[email protected]_tag('blog/latest_post.html')\ndef show_latest_post(count=3):\n post=Post.objects.filter(status__exact='published')\n latest_post=post.order_by('-publish')[:count]\n return{'latest_post':latest_post}\n\[email protected]_tag\ndef get_MostCommentedPost(cnt=3):\n return Post.objects.annotate(total_comments=Count('comments')).order_by('-total_comments') [:cnt]\n" } ]
1
ucb-smartcities/Tutorials-General-Info
https://github.com/ucb-smartcities/Tutorials-General-Info
81f6e3b01eea0f19767b585194de16355aae8252
661c22014a4e4e85bc54a9cb1282b9e0b7e90d4d
d081d3793ad9935ce1f7724fd1ab2058b9bb2f25
refs/heads/master
2017-05-09T18:35:19.481908
2017-02-20T22:24:14
2017-02-20T22:24:14
36,403,280
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6315200924873352, "alphanum_fraction": 0.6568554639816284, "avg_line_length": 45.08771896362305, "blob_id": "21e2c8a4473ca4278a39c12fa0ba311b0aca7533", "content_id": "ec34270822482d4bd342e43ac08a5ff8ec1299bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2684, "license_type": "no_license", "max_line_length": 125, "num_lines": 57, "path": "/Network-Analysis-Tutorials/departure_time/Batch_Gradient_Descent.py", "repo_name": "ucb-smartcities/Tutorials-General-Info", "src_encoding": "UTF-8", "text": "## Code to solve departure time estimation optimization problem through batch-gradient descent\r\n# Objective function = Sum_r_d{qrd*log(qrd)-qrd} + Sum_k_t{vkt-Sum_r_d(qrd*prktd)} - log(qrd-Nrd)\r\n# Gradient function = qrd - Sum_r_d(pkrtd) - 1/(qrd-Nrd)\r\n\r\nimport numpy as np\r\nimport csv\r\nimport sys\r\n\r\n#Defining variables\r\nNum_zones = sys.argv[0] #Number of departure zones\r\nNum_links = sys.argv[1] #Number of links where counts are observed\r\nTime_bins = sys.argv[2] #Maximum number of time bins for a which a journey may last\r\nqrd_old = np.zeros(Num_zones,Time_bins) # Value of objective function before gradient step\r\nqrd_new = np.zeros(Num_zones,Time_bins) # Value of objective function after gradient step\r\n#gamma = 0.01 # step size (if constant)\r\nprecision =0.01 #Convergence criteria parameter\r\n\r\n#Importing data\r\n\r\n#1. Prktd: Probability matrix for an agent departing zone 'r' in time bin 'd' to be observed at link 'k' in time bin 't' \r\n# Matrix size = Num_zones X Time_bins X Num_Links X Time_bins\r\nPrktd=np.zeros(Num_zones,Time_bins,Num_links,Time_bins)\r\nC = np.zeros(Num_zones*Time_bins,Num_links,Time_bins)\r\nfor i in range(1,Num_zones):\r\n C[i]=np.genfromtxt(sys.argv[3], delimiter=',', usecols=(0,1,2,3,4,5,6,7,8,9,10,11))\r\nC=np.reshape(C, (Num_zones,Time_bins,Num_links,Time_bins))\r\nPrktd=C[:,0:12,:,:]\r\n\r\n#2. Vtk: Link count at link 'k' during time bin 't'\r\n# Matrix Size = Num_Links X Number of 5-minute bins in a day (288)\r\nVtk=np.genfromtxt(sys.argv[4], delimiter=',')\r\n\r\n#3. Nrd: Observed departures from zone 'r' during time bin 'd' using cell-phone data\r\n# Matrix Size = Num_zones X Number of 5-minute bins in a day (288)\r\nNrd=np.genfromtxt(sys.argv[5], delimiter=',')\r\n\r\nfor t in range(1,277): #there are 288 5-minute bins in a day, but we can only solve for 277 5-minute bins with the data\r\n qrd_new = Nrd[:,t-1:t+Time_bins-1]+ np.ones((Num_zones,Time_bins))# Initial value chosen as (Nrd+1) to ensure feasibility\r\n def f_derivative(x): #Defining derivative of objective function\r\n return np.log(x)-np.sum(np.sum(Prktd,axis=0),axis=0)-1./(x-Nrd[:,t-1:t+Time_bins-1])\r\n count=1\r\n while (np.linalg.norm(f_derivative(qrd_new)) >= precision):\r\n # Defining step size (if variable)\r\n if (count ==1):\r\n gamma = 1\r\n else:\r\n gamma = 1./np.log(count)\r\n #Gradient step\r\n qrd_old = qrd_new\r\n qrd_new = qrd_old - gamma*f_derivative(qrd_old)\r\n count+=1\r\n \r\n # Saving to .csv file \r\n with open(sys.argv[6]+str(t)+'_gamma_'+str(gamma[a])+'.csv','wb') as f:\r\n writer = csv.writer(f)\r\n for i in range(len(qrd_new)):\r\n writer.writerow(qrd_new[i])\r\n" }, { "alpha_fraction": 0.6967699527740479, "alphanum_fraction": 0.7369808554649353, "avg_line_length": 67.95454406738281, "blob_id": "32ca270bb358306ac1363de7207f91fa1a3675e7", "content_id": "130c7c2ebb51526d7495699aa14853974e0c9ea2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1517, "license_type": "no_license", "max_line_length": 216, "num_lines": 22, "path": "/Network-Analysis-Tutorials/departure_time/README.md", "repo_name": "ucb-smartcities/Tutorials-General-Info", "src_encoding": "UTF-8", "text": "# How to run code for departure time estimation \n\n## Inputs required: \n1. Number of zones in network (r)\n2. Number of links in network (k)\n3. Maximum number of 5-minute time bins for which a trip may last (t_max)\n4. Filepath for link count matrix (vkt) - .csv file containing a k X 288 matrix\n5. Filepath for partially observed departure matrix (Nrd) - .csv file containing a r X 288 matrix\n6. Base filepath for link route probability matrix (Prkdt) - r X 288 .csv files, each containing a k X 288 matrix with filename format `<base_filename><zone_number X 288 + time_bin_number>.csv`\n7. Base filepath for output files - 288 .csv files, each containing a r X 288 matrix with filename format `<base_filename><time_bin_number>.csv`\n\n## 1. Using Python for Batch Gradient Descent:\n`$ python Batch_Gradient_Descent.py arg1 arg2 arg3 arg6 arg4 arg5 arg7` \n \nCommand for running code with sample input data: \n`$ python Batch_Gradient_Descent.py 54 54 36 './sample_input/probability_matrix/Constraints2' './sample_input/link_5_min_counts.csv' './sample_input/cell_phone_constraints.csv' './sample_output/dept_time_'` \n \n## 2. Using MATLAB interface for IP-OPT: \n`$ matlab -r Departure_Time_Estimation(arg1, arg2, arg3, arg4, arg5, arg6, arg7)` \n \nCommand for running code with sample input data: \n`$ matlab -r Departure_Time_Estimation(54, 54, 12, './sample_input/link_5_min_counts.csv', './sample_input/cell_phone_constraints.csv', './sample_input/probability_matrix/Constraints2', './sample_output/dept_time_')`\n" } ]
2
villegar/vaca
https://github.com/villegar/vaca
0d34f6c9ffeef1b732d5969d47c9a132e9d13c32
4635196f0889818ed280d52cfdf317e3b2af0788
577c3ccd2743a9f3ea7c48af53b71a524a8b317d
refs/heads/master
2023-07-09T00:32:48.879247
2021-08-10T18:06:05
2021-08-10T18:06:05
276,186,657
0
0
MIT
2020-06-30T19:10:10
2020-11-01T12:16:24
2021-08-10T18:06:05
Shell
[ { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7300000190734863, "avg_line_length": 99, "blob_id": "f07f98a20e8e2b038e5a185be88deec0e9bbbb2b", "content_id": "93384a6e89562f6478c1fc31a3cc221681874cd2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 100, "license_type": "permissive", "max_line_length": 99, "num_lines": 1, "path": "/README.md", "repo_name": "villegar/vaca", "src_encoding": "UTF-8", "text": "# VaCa: Variant Calling pipeline <img src=\"images/logo.png\" alt=\"logo\" align=\"right\" height=200px/>\n" }, { "alpha_fraction": 0.594698429107666, "alphanum_fraction": 0.6039185523986816, "avg_line_length": 29.988094329833984, "blob_id": "8711b9aae272e07982f198530f5ad5dfaa20868e", "content_id": "a5779a227448da25a637ba3bbec622491bb50bf3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2603, "license_type": "permissive", "max_line_length": 90, "num_lines": 84, "path": "/Snakefile", "repo_name": "villegar/vaca", "src_encoding": "UTF-8", "text": "####### Libraries #######\nfrom utils import extractFilenames, findLibraries, loadGenome, verifyGenome, which\nfrom utils import expand_list as el\n\n####### Global variables #######\nEXTENSION = config[\"reads\"][\"extension\"]\nPREFIX = config[\"reads\"][\"prefix\"]\nREADS = config[\"reads\"][\"path\"]\nPAIRED_END = [True if config[\"reads\"][\"end_type\"] == \"pe\" else False][0]\ntry:\n BBDUK_OPTIONS = config[\"bbduk\"][\"options\"]\nexcept:\n raise ValueError(\"bbduk > options not found in the configuration file\")\nif PAIRED_END:\n FORWARD_READ_ID = [config[\"reads\"][\"forward_read_id\"]]\n REVERSE_READ_ID = [config[\"reads\"][\"reverse_read_id\"]]\n ENDS = [FORWARD_READ_ID,REVERSE_READ_ID]\n SUFFIX = \"_\" + FORWARD_READ_ID[0] + \".\" + EXTENSION\nelse:\n ENDS = []\n FORWARD_READ_ID = []\n REVERSE_READ_ID = []\n SUFFIX = \".\" + EXTENSION\n\nLIBS = findLibraries(READS,PREFIX,SUFFIX)\n\n###### Multithread configuration #####\nCPUS_FASTQC = 4\nCPUS_TRIMMING = 5\nCPUS_HISAT2_INDEX = 40\nCPUS_ALIGNMENT = 10\nCPUS_READCOUNTS = 20\n\nADAPTER = which(\"bbduk\")\n\n####### Output directories #######\nREF_GENOME = \"GENOME/\"\nRAW_FASTQC = \"1.QC.RAW/\"\nTRIMMED_READS = \"2.TRIMMED/\"\nTRIMMED_READS_FASTQC = \"3.QC.TRIMMED/\"\nALIGNMENT = \"4.ALIGNMENT/\"\nALIGNMENT_QC = \"5.QC.ALIGNMENT/\"\nCOUNTS = \"6.COUNTS/\"\nRMD = \"7.RMD/\"\nREPORTS = \"999.REPORTS/\"\n\n####### Reference datasets #######\nFA,GTF = loadGenome(config[\"genome\"])\nGENOME_FILENAMES = {\"FA\":FA,\"GTF\":GTF}\nverifyGenome(config[\"genome\"],REF_GENOME + FA, REF_GENOME + GTF)\n\nRAW_ENDS = [\"\"]\nif PAIRED_END:\n RAW_ENDS = el([\"_\"],ENDS)\n\n####### Rules #######\nrule all:\n input:\n expand(RAW_FASTQC + \"{raw_reads}{raw_ends}_fastqc.{format}\",\n raw_reads = LIBS, raw_ends = RAW_ENDS, format = [\"html\",\"zip\"])\n # expand(RAW_FASTQC + \"{raw_reads}{raw_ends}_fastqc.{format}\",\n # raw_reads = LIBS, raw_ends = [1, 2], format = [\"html\",\"zip\"])\n output:\n expand(REPORTS + \"Report_{step}.html\", step = [\"FastQC_Raw\"])\n params:\n logs \t= directory(LOGS),\n reports\t= directory(REPORTS)\n run:\n shell(\"multiqc -f -o {params.reports} -n Report_FastQC_Raw.html -d \" + RAW_FASTQC)\n\nrule fastqc_raw:\n input:\n reads = READS_PATH + \"{raw_reads}{raw_ends}.\" + EXTENSION\n output:\n html = RAW_FASTQC + \"{raw_reads}{raw_ends}_fastqc.html\",\n zip = RAW_FASTQC + \"{raw_reads}{raw_ends}_fastqc.zip\"\n message:\n \"FastQC on raw data\"\n log:\n RAW_FASTQC + \"{raw_reads}{raw_ends}.log\"\n threads:\n CPUS_FASTQC\n shell:\n \"fastqc -o \" + RAW_FASTQC + \" -t {threads} {input.reads} 2> {log}\"\n" }, { "alpha_fraction": 0.6331360936164856, "alphanum_fraction": 0.7146351337432861, "avg_line_length": 52.878787994384766, "blob_id": "c0db5099412321030320706b5fd47ef5bd743cb4", "content_id": "04ab276e1488fed97b68202cad5e8e0e52d3c304", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 12679, "license_type": "permissive", "max_line_length": 419, "num_lines": 231, "path": "/edited_gatk.sh", "repo_name": "villegar/vaca", "src_encoding": "UTF-8", "text": "srun -N 1 --cpus-per-task=10 --time=5:00:00 --partition=compute --pty bash\r\n\r\nmkdir GATK_tutorial\r\ncd GATK_tutorial\r\n\r\n# download data\r\nwget https://de.cyverse.org/dl/d/3CE425D7-ECDE-46B8-AB7F-FAF07048AD42/samples.tar.gz\r\n tar xvzf samples.tar.gz\r\n rm samples.tar.gz\r\n\r\n\r\nmkdir -p dbSNP\r\nmkdir -p SAM\r\nmkdir -p BAM\r\nmkdir -p sortedBAM\r\nmkdir -p SNPs\r\nmkdir -p SNPs/{MERGED,persample}\r\n\r\nGATK=\"${PWD}/\"\r\ndbSNP=\"${GATK}/dbSNP/\"\r\nSAM=\"${GATK}/SAM/\"\r\nBAM=\"${GATK}/BAM/\"\r\nsortedBAM=\"${GATK}/sortedBAM/\"\r\nSNPs=\"${GATK}/SNPs/\"\r\nMERGED=\"${SNPs}MERGED/\"\r\noutdir=\"${SNPs}persample/\"\r\n\r\n## module use /gpfs/shared/modulefiles_local/bio to run these modules\r\nmodule load samtools\r\nmodule load bwa\r\nmodule load R\r\n\r\n# Rscript -e \"install.packages('ggplot2', contriburl=contrib.url('http://cran.r-project.org/'))\"\r\n#\r\n# Rscript -e \"install.packages('gplots', contriburl=contrib.url('http://cran.r-project.org/'))\"\r\n#\r\n# Rscript -e \"install.packages('reshape', contriburl=contrib.url('http://cran.r-project.org/'))\"\r\n#\r\n# Rscript -e \"install.packages('gsalib', contriburl=contrib.url('http://cran.r-project.org/'))\"\r\n#\r\n# Rscript -e \"install.packages('Biobase', contriburl=contrib.url('http://bioconductor.org/packages/release/bioc/'))\"\r\n#\r\n# install.packages('ggplot2', contriburl=contrib.url('http://cran.r-project.org/'))\r\n# install.packages('gplots', contriburl=contrib.url('http://cran.r-project.org/'))\r\n# install.packages('reshape', contriburl=contrib.url('http://cran.r-project.org/'))\r\n# install.packages('gsalib', contriburl=contrib.url('http://cran.r-project.org/'))\r\n# install.packages('Biobase', contriburl=contrib.url('http://bioconductor.org/packages/release/bioc/'))\r\n\r\n# mkdir -p $HOME/.rlibs\r\n# export R_LIBS_USER=$HOME/.rlibs\r\n\r\n# download reference genome\r\nwget ftp://ftp.ensemblgenomes.org/pub/fungi/release-47/fasta/saccharomyces_cerevisiae/dna/Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa.gz
\r\ngunzip Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa.gz\r\nbwa index Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa\r\n\r\n###indexing the yeast Genome skip for now\r\nbwa mem -M -R '@RG\\tID:ABC123.LANE3\\tLB:LIB-12JaimeYeast\\tPL: ILLUMINA\\tSM:12' ../genome/Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa.gz
 12JaimeYeast-05272020-12_S5_L001_R1_001.fastq.gz 12JaimeYeast-05272020-12_S5_L001_R2_001.fastq.gz > 12JaimeYeast_aligned.sam\r\n\r\n\r\nmodule load fastqc\r\nmkdir fastqc\r\n\r\n### Trim adaptor : this is to remove the adaptor added by the library ####### cut adapt need to know the adaptors used\r\nmodule load bio/cutadapt/2.0\r\ncutadapt -a CTGTCTCTTATACACATCT -A CTGTCTCTTATACACATCT -o 1JaimeYeast-05272020-1-sq_S4_L001_R1_001_trimmed.fastq -p 1JaimeYeast-05272020-1-sq_S4_L001_R2_001_trimmed.fastq 1JaimeYeast-05272020-1-sq_S4_L001_R1_001.fastq 1JaimeYeast-05272020-1-sq_S4_L001_R2_001.fastq\r\ncutadapt -a CTGTCTCTTATACACATCT -A CTGTCTCTTATACACATCT -o 1JaimeYeast-05272020-1-sq_S4_L002_R1_001_trimmed.fastq -p 1JaimeYeast-05272020-1-sq_S4_L002_R2_001_trimmed.fastq 1JaimeYeast-05272020-1-sq_S4_L002_R1_001.fastq 1JaimeYeast-05272020-1-sq_S4_L002_R2_001.fastq\r\ncutadapt -a CTGTCTCTTATACACATCT -A CTGTCTCTTATACACATCT -o 1JaimeYeast-05272020-1-sq_S4_L003_R1_001_trimmed.fastq -p 1JaimeYeast-05272020-1-sq_S4_L003_R2_001_trimmed.fastq 1JaimeYeast-05272020-1-sq_S4_L003_R1_001.fastq 1JaimeYeast-05272020-1-sq_S4_L003_R2_001.fastq\r\ncutadapt -a CTGTCTCTTATACACATCT -A CTGTCTCTTATACACATCT -o 1JaimeYeast-05272020-1-sq_S4_L004_R1_001_trimmed.fastq -p 1JaimeYeast-05272020-1-sq_S4_L004_R2_001_trimmed.fastq 1JaimeYeast-05272020-1-sq_S4_L004_R1_001.fastq 1JaimeYeast-05272020-1-sq_S4_L004_R2_001.fastq\r\n\r\n## bbduk to automatically trim adaptors\r\nbbduk in1=1JaimeYeast-05272020-1-sq_S4_L001_R1_001.fastq.gz in2=1JaimeYeast-05272020-1-sq_S4_L001_R2_001.fastq.gz out1=1JaimeYeast-05272020-1-sq_S4_L001_R1_001_trimmed.fastq.gz out2=1JaimeYeast-05272020-1-sq_S4_L001_R2_001_trimmed.fastq.gz ref=/gpfs/shared/apps_local/bbtools/38.79/resources/nextera.fa.gz k=23 mink=11 hdist=1 tbo tpe ktrim=l\r\n#########RUN THIS: ADD read Group!!!!#############\r\n\r\nfor R1 in *_R1_001_trimmed.fastq;do\r\n SM=$(echo $R1 | cut -d\"_\" -f1) ##sample ID\r\n LB=$(echo $R1 | cut -d\"_\" -f1,2) ##library ID\r\n PL=\"Illumina\" ##platform (e.g. illumina, solid)\r\n RGID=$(cat $R1 | head -n1 | sed 's/:/_/g' |cut -d \"_\" -f1,2,3,4) ##read group identifier\r\n PU=$RGID.$LB ##Platform Unit\r\n echo -e \"SM:$SM\\tPL:$PL\\tLB:$LB\\tPU:$PU\"\r\n\r\n R2=$(echo $R1 | sed 's/_R1_/_R2_/')\r\n echo $R1 $R2\r\n bwa mem -t 40 -M -R '@RG\\tID:'${RGID}'\\tSM:'${SM}'\\tPL:'${PL}'\\tLB:'${LB}'\\tPU:'${PU} Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa $R1 $R2 > ${SAM}${R1%_R1_001.fastq}.sam\r\ndone\r\n\r\n\r\n\r\n# Generate BAM file\r\nfor samfile in ${SAM}*.sam;do\r\n sample=$(basename \"${samfile%.*}\")\r\n echo \"Doing: \" $sample\r\n samtools view -bS -o ${BAM}${sample}.bam $samfile\r\n echo \"Created: \" ${BAM}${sample}.bam\r\n samtools sort ${BAM}${sample}.bam -o ${sortedBAM}${sample}.sorted.bam\r\n echo \"Created: \" ${sortedBAM}${sample}.sorted.bam\r\ndone\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nwget https://github.com/broadinstitute/picard/releases/download/2.9.4/picard.jar\r\nchmod u+x picard.jar\r\n\r\n\r\n\r\n\r\nwget https://de.cyverse.org/dl/d/6177B1E0-718A-4F95-A83B-C3B88E23C093/GenomeAnalysisTK-3.7-0.tar.bz2\r\ntar xjf GenomeAnalysisTK-3.7-0.tar.bz2\r\n\r\n\r\n\r\njava -Xmx10g -jar ${GATK}picard.jar CreateSequenceDictionary R=Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa O=Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.dict\r\n samtools faidx Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa\r\n\r\n\r\n# Merge BAM replicates\r\n#samtools view -H ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L001_R1_001_trimmed.fastq.sorted.bam | grep -v \"^@RG\" | samtools reheader - ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L001_R1_001_trimmed.fastq.sorted.bam > ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L001_R1_001_trimmed.fastq.sorted.bam\r\n#samtools view -H ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L002_R1_001_trimmed.fastq.sorted.bam | grep -v \"^@RG\" | samtools reheader - ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L002_R1_001_trimmed.fastq.sorted.bam > ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L002_R1_001_trimmed.fastq.sorted.bam\r\n#samtools view -H ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L003_R1_001_trimmed.fastq.sorted.bam | grep -v \"^@RG\" | samtools reheader - ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L003_R1_001_trimmed.fastq.sorted.bam > ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L003_R1_001_trimmed.fastq.sorted.bam\r\n#samtools view -H ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L004_R1_001_trimmed.fastq.sorted.bam | grep -v \"^@RG\" | samtools reheader - ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L004_R1_001_trimmed.fastq.sorted.bam > ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L004_R1_001_trimmed.fastq.sorted.bam\r\n\r\njava -jar ${GATK}picard.jar MergeSamFiles I=\"${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L001_R1_001_trimmed.fastq.sorted.bam\" I=\"${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L002_R1_001_trimmed.fastq.sorted.bam\" I=\"${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L003_R1_001_trimmed.fastq.sorted.bam\" I=\"${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L004_R1_001_trimmed.fastq.sorted.bam\" OUTPUT=\"${sortedBAM}1_trimmed_merged.sorted.bam\"\r\n\r\n\r\n\r\n\r\n#Mark duplicates\r\n\r\n\r\nfor sample in ${sortedBAM}*.sorted.bam;do\r\n #name=${sample%.sorted.bam}\r\n #name=$(basename \"${sample%.*}\")\r\n name=$(basename \"${sample%.sorted.bam}\")\r\n echo \"Doing: \" $name\r\n java -Xmx10g -jar ${GATK}picard.jar MarkDuplicates INPUT=$sample OUTPUT=${sortedBAM}${name}.dedup.bam METRICS_FILE=$name.metrics.txt;\r\ndone\r\n\r\n# cd sortedBAM\r\n#samtools view -H ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L001_R1_001_trimmed.fastq.sorted.bam\r\n#samtools view -H ${sortedBAM}BD143_TGACCA_L006.sorted.bam\r\n#samtools view -H ${sortedBAM}BD143_TGACCA_merged.sorted.bam\r\n#samtools view -H ${sortedBAM}BD143_TGACCA_merged.dedup.bam\r\n\r\nmv ${sortedBAM}1JaimeYeast-05272020-1-sq_S4_L00*_R1_001_trimmed.fastq.sorted* .\r\n\r\n\r\n\r\nhttp://m.ensembl.org/Saccharomyces_cerevisiae/Info/Annotation#assembly\r\n\r\n# wget 'ftp://ftp.ensemblgenomes.org/pub/fungi/release-47/variation/vcf/saccharomyces_cerevisiae/saccharomyces_cerevisiae.vcf.gz'\r\nwget 'ftp://ftp.ensemblgenomes.org/pub/fungi/release-47/variation/vcf/saccharomyces_cerevisiae/saccharomyces_cerevisiae.vcf.gz' -O ${dbSNP}saccharomyces_cerevisiae.vcf.gz\r\n\r\n# mv Saccharomyces_cerevisiae.vcf.gz Saccharomyces_cerevisiae.vcf.gz\r\n\r\ngunzip -c ${dbSNP}saccharomyces_cerevisiae.vcf.gz > ${dbSNP}saccharomyces_cerevisiae.vcf\r\n## to extract chromosome 15 for Top1\r\ngrep \"^#\" ${dbSNP}saccharomyces_cerevisiae.vcf > ${dbSNP}saccharomyces_cerevisiae_chr15.vcf\r\ngrep \"^15\" ${dbSNP}saccharomyces_cerevisiae.vcf | sed 's/^15/chr15/' >> ${dbSNP}saccharomyces_cerevisiae_chr15.vcf\r\n# Run Recalibration\r\n# BQSR stands for Base Quality Score Recalibration.\r\nsamtools view -H ${sortedBAM}1_trimmed_merged.sorted.bam | grep -v \"^@RG\" | samtools reheader - ${sortedBAM}1_trimmed_merged.sorted.bam > ${sortedBAM}1_trimmed_merged.sorted.bam\r\n\r\n\r\nfor sample in ${sortedBAM}*.dedup.bam;do\r\n #name=${sample%.dedup.bam}\r\n name=$(basename \"${sample%.dedup.bam}\")\r\n echo \"Doing: \" $name\r\n samtools index $sample\r\n java -Xmx10g -jar GenomeAnalysisTK.jar -T BaseRecalibrator -R ${GATK}Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa -I $sample -knownSites ${dbSNP}saccharomyces_cerevisiae.vcf -o ${SNPs}${name}.1st.table\r\n java -Xmx10g -jar GenomeAnalysisTK.jar -T BaseRecalibrator -R ${GATK}Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa -I $sample -knownSites ${dbSNP}saccharomyces_cerevisiae.vcf -BQSR ${SNPs}${name}.1st.table -o ${SNPs}${name}.2nd.table\r\n java -Xmx10g -jar GenomeAnalysisTK.jar -T PrintReads -R ${GATK}Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa -I $sample -BQSR ${SNPs}${name}.2nd.table -o ${SNPs}${name}.recal.bam\r\n java -Xmx10g -jar GenomeAnalysisTK.jar -T AnalyzeCovariates -R ${GATK}Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa -before ${SNPs}${name}.1st.table -after ${SNPs}${name}.2nd.table -plots ${SNPs}${name}.BQSR.pdf\r\ndone\r\n\r\n\r\n\r\n# HERE\r\nfor sample in ${SNPs}*.recal.bam;do\r\n name=$(basename \"${sample%.recal.bam}\")\r\n java -Xmx10g -jar GenomeAnalysisTK.jar -T HaplotypeCaller -R ${GATK}Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa --dbsnp ${dbSNP}saccharomyces_cerevisiae.vcf -I $sample --emitRefConfidence GVCF -nct 3 -o ${outdir}${name}.g.vcf\r\ndone\r\n\r\n\r\n#Combine call\r\njava -Xmx10g -jar GenomeAnalysisTK.jar -T GenotypeGVCFs -R ${GATK}Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa --dbsnp ${dbSNP}saccharomyces_cerevisiae.vcf --variant ${outdir}1JaimeYeast-05272020-1-sq_S4_L001_R1_001_trimmed.fastq.g.vcf --variant ${outdir}1JaimeYeast-05272020-1-sq_S4_L002_R1_001_trimmed.fastq.g.vcf -o ${MERGED}raw_variants.vcf\r\n\r\n\r\n # split variants into SNPs and indels\r\nmkdir ${MERGED}/SNPs\r\nmkdir ${MERGED}/INDELs\r\njava -Xmx10g -jar ${GATK}GenomeAnalysisTK.jar -T SelectVariants -R ${GATK}Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa -V ${MERGED}raw_variants.vcf -selectType SNP -o \"${MERGED}/SNPs/\"raw_SNP.vcf\r\njava -Xmx10g -jar ${GATK}GenomeAnalysisTK.jar -T SelectVariants -R ${GATK}Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa -V ${MERGED}raw_variants.vcf -selectType INDEL -o \"${MERGED}/INDELs/\"raw_INDEL.vcf\r\n\r\n\r\n# Distribution of variants\r\ncd $MERGED\r\nmkdir both\r\ncp INDELs/* ./both/\r\ncp SNPs/* ./both/\r\ncd \"$MERGED/both\"\r\nwget https://raw.githubusercontent.com/drtamermansour/angus/2017/densityCurves.R\r\nfor var in \"SNP\" \"INDEL\";do\r\n for ann in \"QD\" \"MQRankSum\" \"FS\" \"SOR\" \"ReadPosRankSum\";do\r\n annFile=$var.$ann; echo $annFile;\r\n awk -v k=\"$ann=\" '!/#/{n=split($8,a,\";\"); for(i=1;i<=n;i++) if(a[i]~\"^\"k) {sub(k,$3\" \",a[i]); print a[i]}}' raw_$var.vcf > $annFile\r\n grep -v \"^\\.\" $annFile > known.$annFile\r\n grep \"^\\.\" $annFile > novel.$annFile\r\n Rscript densityCurves.R \"$annFile\"\r\n rm $annFile known.$annFile novel.$annFile\r\ndone; done\r\n\r\n# Apply filters\r\ncd ../../.. # Move back to genome\r\njava -Xmx10g -jar GenomeAnalysisTK.jar -T VariantFiltration -R Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa -V $MERGED/both/raw_SNP.vcf --filterExpression \"QD < 2.0 || FS > 60.0 || MQ < 40.0\" --filterName \"snp_filter\" -o $MERGED/both/filtered_SNP.vcf\r\n\r\njava -Xmx10g -jar GenomeAnalysisTK.jar -T VariantFiltration -R Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.fa -V $MERGED/both/raw_INDEL.vcf --filterExpression \"QD < 2.0 || FS > 200.0\" --filterName \"indel_filter\" -o $MERGED/both/filtered_INDEL.vcf\r\n\r\n\r\n#NEXT>>>>>> R Programming\r\nGWAS\r\nFamily-Based/Gene-based Study\r\nEthnicity Mapping\r\nCancer gene discovery/Clonal evolution\r\nPersonalized medicine\r\n" } ]
3
mkcin/YouTube_Downloader
https://github.com/mkcin/YouTube_Downloader
0c5441ccbe3f10141cd5143d7de72404884e64d7
46fa2771da93c30491e569c695f9f2ac5c4c62fe
db1a8dc0113ec41e11dc270c7846a3dd88feb0fb
refs/heads/master
2021-06-27T19:58:20.584749
2017-09-17T20:32:28
2017-09-17T20:32:28
103,747,117
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8426966071128845, "alphanum_fraction": 0.8426966071128845, "avg_line_length": 43.5, "blob_id": "05944418135f121a5f1172ae6eaeeb2645b691b8", "content_id": "cf3fec100c8b01a96c8c49a373ff24d5871bb718", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 89, "license_type": "no_license", "max_line_length": 67, "num_lines": 2, "path": "/README.md", "repo_name": "mkcin/YouTube_Downloader", "src_encoding": "UTF-8", "text": "# YouTube_Downloader\nJust an educational project, searching and downloading from YouTube\n" }, { "alpha_fraction": 0.6001304388046265, "alphanum_fraction": 0.6086105704307556, "avg_line_length": 27.38888931274414, "blob_id": "a4d2571d14ebf0ffca17d5b504e5ebb706f60a35", "content_id": "6280beac6f209f2b66692c7b6b80a49978689fde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1533, "license_type": "no_license", "max_line_length": 129, "num_lines": 54, "path": "/download.py", "repo_name": "mkcin/YouTube_Downloader", "src_encoding": "UTF-8", "text": "from youtube import YoutubeSearch\nfrom query_set import QUERY_SET, prepare_query_set\nfrom youtube_dl_user import download\nimport re\nfrom config import API_KEY\nimport os\n\n'''\ntworzenie zapytania i wypisywanie wynikow wyszukiwania\n'''\n\nyt=YoutubeSearch(API_KEY)\n\nwhat = input('1 - video\\n2 - music\\n')\nif(what == '2'):\n what = 'music'\nelse:\n what = 'video'\n\nprepare_query_set()\n# print (QUERY_SET)\n\nyt.make_request(\n q = QUERY_SET['query'],\n maxResults = QUERY_SET['max_result'],\n type = QUERY_SET['v_type'],\n videoDuration = QUERY_SET['v_duration'],\n order = QUERY_SET['v_order'] )\n\n# print(yt.get_html())\n# print(yt.get_search_results())\nyt.print_search_results_readable()\n\nif(what == 'music'):\n choice = input('choose one or more results (divide with \\\" \\\"):\\n')\n choice = list(choice.strip())\n videoLinks=[]\n # print (choice)\n for ch in choice:\n if(ch != ' '):\n if(not (re.match(r'^([0-9]+)$', str(ch)) and int(ch) >= 1 and int(ch) <= len(yt.get_search_results()['items']))):\n continue\n else:\n videoLink=yt.get_nth_link(int(ch)-1)\n videoLinks.append(videoLink)\n download(videoLinks, 'music')\nelse:\n choice = 0\n choice = input('number of video:\\n')\n if(not (re.match(r'^([0-9]+)$', str(choice)) and int(choice) >= 1 and int(choice) <= len(yt.get_search_results()['items']))):\n choice=1\n # print(yt.get_search_results())\n videoLink=yt.get_nth_link(int(choice)-1)\n download([videoLink], 'video')\n" }, { "alpha_fraction": 0.44978582859039307, "alphanum_fraction": 0.477629691362381, "avg_line_length": 37.55045700073242, "blob_id": "8bfca336e7650bc6e609eea81bc3c97b0d519c5b", "content_id": "5bd23358406e73369efe9c303bc9b628d09cc33d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4202, "license_type": "no_license", "max_line_length": 147, "num_lines": 109, "path": "/youtube_dl_user.py", "repo_name": "mkcin/YouTube_Downloader", "src_encoding": "UTF-8", "text": "import youtube_dl\nimport re\nimport json\nimport subprocess\n\nYT_OPTS = []\n\ndef ask_for_flags():\n czy_mp3 = input('video or music (default video):\\n1 - video\\n2 - music\\n')\n if(czy_mp3 == '2'):\n m_format = input('format (default mp3):\\n1 - mp3\\n2 - m4a\\n3 - flac\\n4 - wav\\n5 - aac\\n')\n formats = {\n '1': 'mp3',\n '2': 'm4a',\n '3': 'flac',\n '4': 'wav',\n '5': 'aac'}\n if(re.match(r'^([0-9]+)$', str(m_format)) and int(m_format) >= 1 and int(m_format) <= 5):\n m_format = formats[m_format]\n else:\n # print(\"default\\n\")\n m_format = 'mp3'\n YT_OPTS.append('-x')\n YT_OPTS.append('--audio-format')\n YT_OPTS.append(m_format)\n print(YT_OPTS)\n else:\n YT_OPTS.append('-f')\n quality = input('choose quality (default possible best):\\n1 - 1080p\\n2 - 720p\\n3 - 480p\\n4 - 360p\\n5 - 144p\\n')\n if(quality == '5'):\n YT_OPTS.append('bestvideo[height<=144]+bestaudio/best[height<=144]')\n if(quality == '4'):\n YT_OPTS.append('bestvideo[height<=360]+bestaudio/best[height<=360]')\n if(quality == '3'):\n YT_OPTS.append('bestvideo[height<=480]+bestaudio/best[height<=480]')\n if(quality == '2'):\n YT_OPTS.append('bestvideo[height<=720]+bestaudio/best[height<=720]')\n if(quality == '1'):\n YT_OPTS.append('bestvideo[height<=1080]+bestaudio/best[height<=1080]')\n else:\n YT_OPTS.append('bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best')\n\n\ndef download(vids, what):\n if(what == 'video'):\n vid = vids[0]\n print(vid)\n ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})\n with ydl:\n result = ydl.extract_info(\n vid,\n download=False # We just want to extract the info\n )\n video = result\n # print(video)\n # print(json.dumps(video, indent = 2))\n print('---------------------------\\n')\n print(video['title'])\n count = 1\n choices={}\n audio = 1000\n for v_format in video['formats']:\n # print(v_format['format'] + \" \" + v_format['ext'])\n f = v_format['format'].split(' - ')[0]\n first_word = v_format['format'].split()[2]\n if(first_word=='audio'):\n audio = min(audio, int(f))\n continue\n # print(first_word)\n # print(f)\n try :\n print('{}. {} - {} ({}MB)'.format(count, v_format['format'].split(' - ')[1], v_format['ext'], float(v_format['filesize'])/1000000))\n except:\n print('{}. {} - {}'.format(count, v_format['format'].split(' - ')[1], v_format['ext']))\n choices[str(count)] = [f, first_word]\n count += 1\n # print(choices)\n choice = input(': ')\n if(not (re.match(r'^([0-9]+)$', str(choice)) and int(choice) >= 1 and int(choice) <= int(count-1))):\n choice = str(count-1)\n opt = ['-cif', str(choices[choice][0]+'+'+str(audio)), '-o', '%(title)s.%(ext)s', vid]\n\n youtube_dl.main(opt)\n else:\n # ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})\n # with ydl:\n # result = ydl.extract_info(\n # vid,\n # download=False # We just want to extract the info\n # )\n # video = result\n # # print(video)\n # # print(json.dumps(video, indent = 2))\n # print(video['title'])\n # # print('----------------------')\n # for v_format in video['formats']:\n # # print(v_format['format'] + \" \" + v_format['ext'])\n # f = v_format['format'].split(' - ')[0]\n # first_word = v_format['format'].split()[2]\n # if(first_word == 'audio'):\n # opt = ['-f', f, vid]\n # youtube_dl.main(opt)\n opt = ['--extract-audio', '--audio-format', 'mp3', '-o', '%(title)s.%(ext)s']\n for v in vids:\n opt.append(v)\n youtube_dl.main(opt)\n\n# ask_for_flags()\n# download('https://www.youtube.com/watch?v=fKopy74weus', 'video')\n" }, { "alpha_fraction": 0.6216216087341309, "alphanum_fraction": 0.6216216087341309, "avg_line_length": 34, "blob_id": "f7c16f51081f2f7f0493e99d1a5cb7e24bb33ca9", "content_id": "5f418080e65e6092a1ff9fed9ca862c0506cf5a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 37, "license_type": "no_license", "max_line_length": 34, "num_lines": 1, "path": "/config.py", "repo_name": "mkcin/YouTube_Downloader", "src_encoding": "UTF-8", "text": "\n\nAPI_KEY = '' #podaj swoj klucz api\n" }, { "alpha_fraction": 0.5168601274490356, "alphanum_fraction": 0.5323383212089539, "avg_line_length": 26.409090042114258, "blob_id": "0d02f645069c204faaed6e6a23cbfb17238453f0", "content_id": "1d8c30048bcfaa9bd9b4f30d08f959d596ddd749", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1809, "license_type": "no_license", "max_line_length": 112, "num_lines": 66, "path": "/query_set.py", "repo_name": "mkcin/YouTube_Downloader", "src_encoding": "UTF-8", "text": "'''\nwczytywanie kryteriow zapytania\n'''\nimport re\ntry:\n from urllib.request import quote\nexcept:\n from urlli2 import quote\n\nQUERY_SET = {\n 'query': '',\n 'max_result': '',\n 'v_type': '',\n 'v_duration': '',\n 'v_order': ''\n}\n\ndef prepare_query_set():\n query = input(\"what to search (nothing given would search most popular videos I guess):\\n\")\n if(query == ''):\n query = None\n QUERY_SET['query'] = query\n # print(\"default\\n\")\n else:\n QUERY_SET['query'] = quote(query)\n\n max_result = input(\"how many results (default 5):\\n\")\n if(re.match(r'^([0-9]+)$', str(max_result))):\n max_result = max_result\n else:\n max_result = None\n # print(\"default\\n\")\n QUERY_SET['max_result'] = max_result\n\n video_type = 'video'\n QUERY_SET['v_type'] = video_type\n\n v_duration = input(\"video duration (default any):\\n1 - any\\n2 - long\\n3 - medium\\n4 - short\\n\")\n duration = {\n '1': 'any',\n '2': 'long',\n '3': 'medium',\n '4': 'short' }\n if(re.match(r'^([0-9]+)$', str(v_duration)) and int(v_duration) >= 1 and int(v_duration) <= 4):\n v_duration = duration[v_duration]\n else:\n # print(\"default\\n\")\n v_duration = None\n QUERY_SET['v_duration'] = v_duration\n\n v_order = input(\"sort by (default relevance):\\n1 - relevance\\n2 - upload date\\n3 - viewcount\\n4 - rating\\n\")\n order = {\n \"1\": \"relevance\",\n \"2\" : \"date\",\n \"3\" : \"viewcount\",\n \"4\" : \"rating\" }\n if(re.match(r'^([0-9]+)$', str(v_order)) and int(v_order) >= 1 and int(v_order) <= 4):\n v_order = order[v_order]\n else:\n # print(\"default\\n\")\n v_order = None\n QUERY_SET['v_order'] = v_order\n\nif __name__ == '__main__':\n prepare_query_set()\n print (QUERY_SET)\n" } ]
5
Zercos/SubsMan
https://github.com/Zercos/SubsMan
b4bdbb8c5a9da09befcb8eb9d37b228d0f8a78dc
e675f70a74cd9af0ab6131fc1094a216b4411cf6
b121d33f678c13a96ee42a07583f46b67d542326
refs/heads/master
2022-05-18T01:52:25.178529
2022-05-08T16:07:09
2022-05-08T16:07:09
240,886,157
0
0
null
2020-02-16T12:18:21
2021-10-04T17:00:57
2022-05-08T16:07:09
Python
[ { "alpha_fraction": 0.5691288113594055, "alphanum_fraction": 0.591856062412262, "avg_line_length": 38.11111068725586, "blob_id": "3926071aa64757f0f5ff77077eec8c48c349f823", "content_id": "a0dc6beb657045e3c2ff1538eca6b83e194fbfe2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1056, "license_type": "no_license", "max_line_length": 118, "num_lines": 27, "path": "/main/migrations/0001_initial.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-26 15:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=60, verbose_name='Name')),\n ('description', models.CharField(blank=True, max_length=255, null=True, verbose_name='Description')),\n ('site', models.CharField(max_length=60, verbose_name='Site')),\n ('active', models.BooleanField(default=True, verbose_name='Active')),\n ('product_code', models.CharField(blank=True, max_length=60, null=True, verbose_name='Product code')),\n ('date_updated', models.DateTimeField(auto_now=True)),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6794871687889099, "alphanum_fraction": 0.6794871687889099, "avg_line_length": 25, "blob_id": "1f6d49e2235684e152c805845b1275cc5cfda8c5", "content_id": "dc1697980e8d06973faa0f29b3aa5b1dfa2a1162", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 234, "license_type": "no_license", "max_line_length": 82, "num_lines": 9, "path": "/main/forms.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django import forms\n\nfrom main.models import Subscription\n\n\nclass SubscriptionForm(forms.ModelForm):\n class Meta:\n model = Subscription\n fields = ['plan', 'user', 'recurring', 'term_start', 'term_end', 'status']\n" }, { "alpha_fraction": 0.5035424828529358, "alphanum_fraction": 0.51113361120224, "avg_line_length": 28.939393997192383, "blob_id": "98da8125e944b6ceaba57a3193ababa402c41084", "content_id": "b27e4697c74c1ef5f05fb0df6c4f962ce3e42d22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1976, "license_type": "permissive", "max_line_length": 100, "num_lines": 66, "path": "/static/js/index.js", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import './semantic.min.js'\n\n$(document).ready(function () {\n // adds CSRF token header to ajax requests to avoid \"Can't verify CSRF token authenticity\" error\n $.ajaxPrefilter(function (options) {\n if (!options.beforeSend) {\n options.beforeSend = function (xhr) {\n xhr.setRequestHeader('X-CSRF-Token', $('meta[name=\"csrf-token\"]').attr('content'));\n }\n }\n });\n\n $(window).resize(function () {\n if ($(window).width() >= 992) {\n $('.ui.left.sidebar').sidebar('show');\n $('#sidebar-toggle').hide();\n } else {\n $('.ui.left.sidebar').sidebar('hide');\n $('#sidebar-toggle').show();\n }\n }).resize();\n\n $('.ui.left.sidebar').sidebar({\n dimPage: false,\n transition: 'push',\n exclusive: false,\n closable: false\n });\n\n if ($('#sidebar-toggle').length) { // sidebar is present only for logged-in user\n $('.ui.left.sidebar').sidebar('attach events', '#sidebar-toggle');\n }\n\n $('.ui.dropdown').dropdown();\n\n // basket icon on top menu\n $('.basket-open').click(() => {\n $('.basket-container').slideToggle();\n });\n\n $('.basket-close').click(() => {\n $('.basket-container').slideUp();\n });\n\n if ($('body').is('.subscriptions.create,.subscriptions.new,.users.new_subscription')) {\n $('.basket-container').show();\n $('.basket-close').hide();\n $('.basket-checkout').hide();\n $('.basket-delete').hide();\n } else {\n $('.basket-container').hide();\n $('.basket-close').show();\n $('.basket-checkout').show();\n $('.basket-delete').show();\n }\n\n $('.basket-delete').click(() => {\n document.cookie = 'lc_basket=; Path=/; Expires=Thu, 01 Jan 1970 00:00:01 GMT;';\n location.reload();\n });\n\n // trigger add-card form\n $('.add-card').click(() => {\n $('.stripe-button-el').trigger('click');\n });\n});\n" }, { "alpha_fraction": 0.6772068738937378, "alphanum_fraction": 0.6772068738937378, "avg_line_length": 33.5, "blob_id": "571f1bea5626ae72431bda044d0dd1998cefe494", "content_id": "261ed62b46a786b806954d2bb4219f62d076a603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 759, "license_type": "no_license", "max_line_length": 93, "num_lines": 22, "path": "/main/tests/test_signals.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from decimal import Decimal\n\nfrom django.core.files.images import ImageFile\nfrom django.test import TestCase\n\nfrom main import models\nfrom main.tests.factories import ProductFactory\n\n\nclass TestSignal(TestCase):\n def test_thumbnails_are_generated_on_save(self):\n product = ProductFactory()\n\n with open('main/fixtures/cat.jpeg', 'rb') as f:\n image = models.ProductImage(product=product, image=ImageFile(f, name='cat.jpeg'))\n image.save()\n image.refresh_from_db()\n with open('media/product-thumbnails/cat.jpeg', 'rb') as f:\n expected_content = f.read()\n assert image.thumbnail.read() == expected_content\n image.thumbnail.delete(save=False)\n image.image.delete(save=False)\n" }, { "alpha_fraction": 0.5381679534912109, "alphanum_fraction": 0.5410305261611938, "avg_line_length": 40.91999816894531, "blob_id": "ea52d9526b43d41468a7bcdad4fb6490c71f5dec", "content_id": "7ea81d1418937f9e5051f911cc7f3c99a9e246fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1048, "license_type": "no_license", "max_line_length": 116, "num_lines": 25, "path": "/user/templates/registration/login.html", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n{% block title %}Log in {{ block.super }}{% endblock %}\n{% block content %}\n <h2 style=\"font-weight: normal\">Log in</h2>\n\n <form method=\"post\" class=\"ui form\" id=\"user_login\">\n {% csrf_token %}\n <div class=\"field\">\n {{ form.email.label_tag }}\n <input type=\"email\" class=\"form-control {% if form.email.errors %}error{% endif %}\" id=\"id_email\"\n name=\"email\" placeholder=\"Your email\" value=\"{{ form.email.value|default:'' }}\">\n </div>\n <div class=\"field\">\n {{ form.password.label_tag }}\n <input type=\"password\" class=\"{% if form.password1.errors %}error{% endif %}\"\n id=\"id_password\" name=\"password\" autocomplete=\"off\">\n </div>\n\n <div class=\"actions\">\n <input type=\"submit\" name=\"commit\" value=\"Log in\" class=\"ui primary button\" data-disable-with=\"Log in\"/>\n </div>\n </form>\n <div class=\"ui divider\"></div>\n <a href=\"{% url 'user:sign_up' %}\">Sign Up</a>\n{% endblock %}\n" }, { "alpha_fraction": 0.7724014520645142, "alphanum_fraction": 0.7759856581687927, "avg_line_length": 26.219512939453125, "blob_id": "20b05b962e072b6b19e8ac7c6299de4825ad04b7", "content_id": "ec4231a7ae8893297eeac15e091eb26c2ef49473", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1122, "license_type": "no_license", "max_line_length": 105, "num_lines": 41, "path": "/README.md", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "# SubsMan\nSubsMan is a web application for Licence and subscription manager for Saas\napplication\n\n## Purpose\nThe purpose of the SubsMan is to provide a way to manage a subscriptions or licences of company products.\n\nThe main target of this project was creation\nof the example of the web application “Subsman”. The concept of the web application will\nsolve the problem of the multi-platforms and will give the possibility to use the system to\nanyone with internet access.\n\nThe concept of collection the management of all companies’\nproducts subscription in one place should solve a lot of problem. This will give a lot of\nflexibility and more convenient user interaction.\n\n## Configuration (development)\n\n* Clone this repository:\n\n* Go to project directory and install requirements for development\n```\ncd SubsMan && pipenv install --dev && pipenv shell\n```\n\n* Create .env and fill with your environment variables configuration file from example config\n```\ncp config/.env.example config/.env\n```\n\n* Run development server:\n\n```\n./manage.py runserver_plus\n```\n\n## System dependencies\n\nPython: *v3.8*\n\nPostgreSQL: *v11*\n" }, { "alpha_fraction": 0.7103825211524963, "alphanum_fraction": 0.7194899916648865, "avg_line_length": 33.3125, "blob_id": "df9de72d70bb9534e6ff5922f4b1efc84579bd68", "content_id": "ff2f577220c33709c6edfb1c793be1039e5f99f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 549, "license_type": "no_license", "max_line_length": 60, "num_lines": 16, "path": "/main/tests/test_models.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django.test import TestCase\n\nfrom main.models import Product, Plan\nfrom main.tests.factories import ProductFactory, PlanFactory\n\n\nclass TestModels(TestCase):\n def test_product_manager(self):\n ProductFactory.create_batch(2, active=True)\n ProductFactory.create(active=False)\n self.assertEqual(2, len(Product.objects.active()))\n\n def test_plan_get_active(self):\n PlanFactory.create_batch(3, active=True)\n PlanFactory.create_batch(2, active=False)\n self.assertEqual(3, Plan.objects.active().count())\n" }, { "alpha_fraction": 0.5943852663040161, "alphanum_fraction": 0.6239109635353088, "avg_line_length": 56.38888931274414, "blob_id": "677a59e01367169b5b18cfe770887c56769b621e", "content_id": "9de00f22c099c4ae87b6aea15211cdc5d4545e2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2066, "license_type": "no_license", "max_line_length": 144, "num_lines": 36, "path": "/user/migrations/0003_addresses.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-09-24 09:40\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('user', '0002_auto_20200809_0946'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Address',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('address1', models.CharField(max_length=120, verbose_name='Address')),\n ('address2', models.CharField(blank=True, max_length=120, null=True, verbose_name='Address')),\n ('city', models.CharField(max_length=60, verbose_name='City')),\n ('postcode', models.CharField(max_length=10, verbose_name='Postcode')),\n ('country', models.CharField(max_length=60, verbose_name='Country')),\n ('phone', models.CharField(blank=True, max_length=30, null=True, verbose_name='Phone number')),\n ('is_billing', models.BooleanField(default=True)),\n ('billing_address1', models.CharField(blank=True, max_length=120, null=True, verbose_name='Billing address')),\n ('billing_address2', models.CharField(blank=True, max_length=120, null=True, verbose_name='Billing address')),\n ('billing_city', models.CharField(blank=True, max_length=60, null=True, verbose_name='Billing city')),\n ('billing_postcode', models.CharField(blank=True, max_length=10, null=True, verbose_name='Billing postcode')),\n ('billing_country', models.CharField(blank=True, max_length=60, null=True, verbose_name='Billing country')),\n ('date_updated', models.DateTimeField(auto_now=True)),\n ('date_created', models.DateTimeField(auto_now_add=True)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7347561120986938, "alphanum_fraction": 0.7423780560493469, "avg_line_length": 37.588233947753906, "blob_id": "2258e25df2059a16c06a98948189b35e65ec36af", "content_id": "bed6ab6936d15ce925151c483d80ac2554fcca5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 656, "license_type": "no_license", "max_line_length": 96, "num_lines": 17, "path": "/main/tests/test_services.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import datetime as ddt\n\nfrom django.test.testcases import TestCase\n\nfrom main.services import disactivate_expired_subscriptions\nfrom main.tests.factories import SubscriptionFactory\n\n\nclass TestServices(TestCase):\n def test_disactivate_expired_subscriptions(self):\n SubscriptionFactory.create_batch(4, term_end=ddt.datetime.now() - ddt.timedelta(days=1))\n SubscriptionFactory.create_batch(2, term_end=ddt.datetime.now())\n\n with self.assertLogs('main.services', level='INFO') as cm:\n disactivated = disactivate_expired_subscriptions()\n self.assertEqual(4, disactivated)\n self.assertGreater(len(cm.output), 0)\n" }, { "alpha_fraction": 0.5562872290611267, "alphanum_fraction": 0.575101912021637, "avg_line_length": 37.89024353027344, "blob_id": "f6c8f7c0296028e667bcae641f07bfbda1bb12c6", "content_id": "9be39c9ce8e954ca335e7aa58041b99639db183d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3189, "license_type": "no_license", "max_line_length": 100, "num_lines": 82, "path": "/user/tests/test_views.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django.test.testcases import TestCase\nfrom django.urls import reverse\n\nfrom user.models import User, Address\nfrom user.tests.factories import UserFactory\n\n\nclass TestViews(TestCase):\n def test_not_valid_user_registration(self):\n post_data = {\n 'email': '[email protected]',\n 'password1': 'somepassword',\n 'first_name': 'John',\n 'last_name': 'Newt'\n }\n response = self.client.post(reverse('user:sign_up'), post_data)\n self.assertEqual(response.status_code, 200)\n self.assertFalse(User.objects.filter(email='[email protected]').exists())\n self.assertFalse(Address.objects.filter(user__email='[email protected]').exists())\n\n def test_valid_registration(self):\n params = {\n 'email': '[email protected]',\n 'password1': 'somepassword',\n 'password2': 'somepassword',\n 'first_name': 'John',\n 'last_name': 'Newt',\n 'country': 'ua',\n 'phone': '34324324',\n 'postcode': '43222',\n 'city': 'ad',\n 'address1': 'st. Green 1',\n 'is_billing': True\n }\n response = self.client.post(reverse('user:sign_up'), params)\n self.assertEqual(response.status_code, 302)\n self.assertTrue(User.objects.filter(email='[email protected]').exists())\n self.assertTrue(Address.objects.filter(user__email='[email protected]').exists())\n\n def test_valid_registration_with_billing_address(self):\n user_params = {\n 'email': '[email protected]',\n 'password1': 'somepassword',\n 'password2': 'somepassword',\n 'first_name': 'John',\n 'last_name': 'Newt',\n 'country': 'ua',\n 'phone': '34324324',\n 'postcode': '43222',\n 'city': 'ad',\n 'address1': 'st. Green 1',\n 'is_billing': False,\n 'billing_country': 'ua',\n 'billing_postcode': '20222',\n 'billing_city': 'Kyiv',\n 'billing_address1': 'st. Green 2',\n }\n response = self.client.post(reverse('user:sign_up'), user_params)\n self.assertEqual(response.status_code, 302)\n self.assertTrue(User.objects.filter(email='[email protected]').exists())\n self.assertTrue(Address.objects.filter(user__email='[email protected]').exists())\n self.assertEqual(Address.objects.filter(user__email='[email protected]').first().billing_city, 'Kyiv')\n\n def test_account_page(self):\n user = UserFactory()\n self.client.force_login(user)\n response = self.client.get(reverse('user:account'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(user, response.context['user'])\n self.assertEqual(user.addresses.first(), response.context['address'])\n\n def test_edit_account_page(self):\n user = UserFactory()\n self.client.force_login(user)\n response = self.client.get(reverse('user:account_edit'))\n self.assertEqual(200, response.status_code)\n\n def test_edit_address_page(self):\n user = UserFactory()\n self.client.force_login(user)\n response = self.client.get(reverse('user:address_edit'))\n self.assertEqual(200, response.status_code)\n" }, { "alpha_fraction": 0.6738712787628174, "alphanum_fraction": 0.6810758709907532, "avg_line_length": 41.48979568481445, "blob_id": "bef4956e61ba4e718de286928a1902290202c64d", "content_id": "ad8aa71f7759a90694880d807de6962dcc258652", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2082, "license_type": "no_license", "max_line_length": 108, "num_lines": 49, "path": "/main/tests/test_views.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import datetime as ddt\n\nfrom dateutil.relativedelta import relativedelta\nfrom django.test.testcases import TestCase\nfrom django.urls import reverse\n\nfrom main.tests.factories import PlanFactory, ProductFactory\nfrom user.tests.factories import UserFactory\n\n\nclass TestViews(TestCase):\n def test_home_page(self):\n user = UserFactory()\n self.client.force_login(user)\n response = self.client.get(reverse('main:home'))\n self.assertEqual(response.status_code, 200)\n\n def test_plan_list_view(self):\n product = ProductFactory()\n PlanFactory.create_batch(4, product=product)\n user = UserFactory()\n self.client.force_login(user)\n response = self.client.get(reverse('main:plans', kwargs={'product_id': product.id}))\n self.assertEqual(200, response.status_code)\n self.assertEqual(3, len(response.context['page_obj']))\n self.assertEqual(product.plans.active().all()[0], response.context['plan_list'][0])\n\n def test_add_to_basket(self):\n user = UserFactory()\n product = ProductFactory()\n plan = PlanFactory(product=product)\n\n self.client.force_login(user)\n self.client.get(reverse('main:add_to_basket'), {'plan_id': plan.id})\n self.assertTrue(user.basket_set.filter(user=user).exists())\n self.assertTrue(user.basket_set.filter(user=user).first().basketitem_set.filter(plan=plan).exists())\n\n def test_create_subscription(self):\n user = UserFactory()\n product = ProductFactory()\n plan = PlanFactory(product=product, period=4, period_unit='months')\n term_end = ddt.datetime.now() + relativedelta(months=4)\n\n self.client.force_login(user)\n response = self.client.get(reverse('main:create_subscription', kwargs={'plan_id': plan.id}))\n self.assertEqual(302, response.status_code)\n self.assertTrue(plan.subscriptions.exists())\n self.assertEqual(plan.subscriptions.first().term_start.date(), ddt.date.today())\n self.assertEqual(plan.subscriptions.first().term_end.date(), term_end.date())\n" }, { "alpha_fraction": 0.6587430834770203, "alphanum_fraction": 0.6600455641746521, "avg_line_length": 36.91358184814453, "blob_id": "704e19e6c7d0557b140a32b09e3dc430ed03e2c9", "content_id": "e0c20cb6a58266905b9cbac64fa67620297853f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3071, "license_type": "no_license", "max_line_length": 117, "num_lines": 81, "path": "/user/views.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django.contrib.auth import authenticate, login\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import transaction\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom django.views.generic import TemplateView, UpdateView\nfrom django_registration import signals\nfrom django_registration.backends.one_step.views import RegistrationView\n\nfrom user.forms import RegistrationForm, AddressForm\nfrom user.models import User, Address\n\n\nclass CustomRegistrationView(RegistrationView):\n form_class = RegistrationForm\n success_url = '/'\n\n def get_context_data(self, **kwargs):\n if 'address_form' not in kwargs:\n kwargs['address_form'] = AddressForm()\n return super().get_context_data(**kwargs)\n\n def register(self, form):\n new_user, form = form\n new_user = authenticate(\n **{\n User.USERNAME_FIELD: new_user.get_username(),\n \"password\": form.cleaned_data[\"password1\"],\n }\n )\n login(self.request, new_user)\n signals.user_registered.send(sender=self.__class__, user=new_user, request=self.request)\n return new_user\n\n def form_valid(self, form: RegistrationForm):\n address_params: dict = {k: self.request.POST.get(k) for k in AddressForm.base_fields if k != 'user'}\n with transaction.atomic():\n sid = transaction.savepoint()\n new_user: User = form.save()\n address_params['user'] = new_user.id\n address_form: AddressForm = AddressForm(address_params)\n if address_form.is_valid():\n new_user = self.register((new_user, form))\n address_form.save()\n else:\n transaction.savepoint_rollback(sid)\n if address_form.is_valid():\n return HttpResponseRedirect(self.get_success_url(new_user))\n else:\n return self.render_to_response(self.get_context_data(form=form, address_form=address_form))\n\n\nclass AccountView(LoginRequiredMixin, TemplateView):\n template_name = 'account.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['user'] = self.request.user\n context['address'] = self.request.user.addresses.first()\n return context\n\n\nclass AccountUpdateView(LoginRequiredMixin, UpdateView):\n model = User\n template_name = 'account_edit.html'\n fields = ['first_name', 'last_name']\n success_url = reverse_lazy('user:account')\n\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass AddressUpdateView(LoginRequiredMixin, UpdateView):\n model = Address\n template_name = 'address_edit.html'\n fields = ['address1', 'address2', 'city', 'postcode', 'country', 'phone', 'billing_address1', 'billing_address2',\n 'billing_city', 'billing_country', 'billing_postcode']\n success_url = reverse_lazy('user:account')\n\n def get_object(self, queryset=None):\n return self.request.user.addresses.first()\n" }, { "alpha_fraction": 0.5569307208061218, "alphanum_fraction": 0.5742574334144592, "avg_line_length": 39.400001525878906, "blob_id": "f8b75f6723949e719aa12d0225ca1ff6e47094f8", "content_id": "f213be19a6043d132cc5b4115ab6968c4d65aec5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 404, "license_type": "no_license", "max_line_length": 109, "num_lines": 10, "path": "/main/templates/_basket.html", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "<div class=\"ui segment\">\n <div class=\"ui three column grid basket\">\n <div style=\"position: absolute; right: 20px; top: 10px; font-size: 18px; cursor: pointer; z-index: 2\"\n class=\"basket-close\">x\n </div>\n {% for item in basket.basketitem_set.all %}\n {% include '_plan_card.html' with plan=item.plan basket_item=item %}\n {% endfor %}\n </div>\n</div>\n" }, { "alpha_fraction": 0.706250011920929, "alphanum_fraction": 0.706250011920929, "avg_line_length": 49, "blob_id": "7b08574fd0e77697d1224dd9afb4518eddba3272", "content_id": "584271ca411f431a235fbb7f4c4042c899b330ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 800, "license_type": "no_license", "max_line_length": 106, "num_lines": 16, "path": "/main/urls.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django.urls import path\n\nfrom main import views\n\napp_name = 'main'\n\nurlpatterns = [\n path('', views.HomeView.as_view(), name='home'),\n path('product/<int:product_id>/plans', views.PlanListView.as_view(), name='plans'),\n path('add-to-basket/', views.add_to_basket, name='add_to_basket'),\n path('delete-from-basket/<int:basket_item_id>/', views.delete_basket_item, name='delete_from_basket'),\n path('subscriptions/new/', views.SubscriptionNewView.as_view(), name='subscription_new'),\n path('subscription/create/<int:plan_id>/', views.create_subscription, name='create_subscription'),\n path('subscriptions/', views.SubscriptionListView.as_view(), name='subscriptions'),\n path('subscriptions/<int:pk>/', views.SubscriptionUpdateView.as_view(), name='subscription_update'),\n]\n" }, { "alpha_fraction": 0.8295454382896423, "alphanum_fraction": 0.8295454382896423, "avg_line_length": 28.33333396911621, "blob_id": "adfa897c796c7e4b5e2c3ad28073899bb57534c4", "content_id": "b6e5df0e817740ecb681d5a8f465da14823283e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "no_license", "max_line_length": 75, "num_lines": 9, "path": "/main/admin.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom main.models import Product, ProductImage, Plan, PlanItem, Subscription\n\nadmin.site.register(Product)\nadmin.site.register(ProductImage)\nadmin.site.register(Plan)\nadmin.site.register(PlanItem)\nadmin.site.register(Subscription)\n" }, { "alpha_fraction": 0.6913896203041077, "alphanum_fraction": 0.6982097029685974, "avg_line_length": 33.16504669189453, "blob_id": "db80ff4752d509e538aa74a79f865427283f2bd9", "content_id": "9cf5e4028fc1ce7a472a1de29a956d2a1c52988c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3519, "license_type": "no_license", "max_line_length": 88, "num_lines": 103, "path": "/main/views.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import datetime as ddt\n\nfrom dateutil.relativedelta import relativedelta\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic import TemplateView, ListView, UpdateView\n\nfrom main.forms import SubscriptionForm\nfrom main.models import Product, Plan, Basket, BasketItem, Subscription\n\n\nclass HomeView(LoginRequiredMixin, TemplateView):\n template_name = 'home.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['products'] = Product.objects.active().all()\n context['subscriptions'] = self.request.user.subscriptions.all()[:5]\n return context\n\n\nclass PlanListView(LoginRequiredMixin, ListView):\n model = Plan\n paginate_by = 3\n template_name = 'plan_list.html'\n\n def get_queryset(self):\n return Product.objects.active().get(pk=self.kwargs['product_id']).plans.active()\n\n\n@login_required\ndef add_to_basket(request):\n plan_id = request.GET.get('plan_id')\n plan = get_object_or_404(Plan, pk=plan_id)\n basket = request.basket\n if not basket:\n basket = Basket.objects.create(user=request.user)\n request.session['basket_id'] = basket.id\n basket_item, created = BasketItem.objects.get_or_create(basket=basket, plan=plan)\n if not created:\n basket_item.quantity += 1\n basket_item.save()\n return HttpResponseRedirect(reverse('main:home'))\n\n\n@login_required\ndef delete_basket_item(request, basket_item_id):\n basket = request.basket\n if not basket:\n raise Http404('No basket provided')\n basket_item = get_object_or_404(BasketItem, pk=basket_item_id)\n basket_item.delete()\n return HttpResponseRedirect(reverse('main:home'))\n\n\nclass SubscriptionNewView(LoginRequiredMixin, TemplateView):\n template_name = 'subscription_new.html'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n plan_id = self.request.GET.get('plan_id')\n plan = get_object_or_404(Plan, pk=plan_id)\n ctx['plan'] = plan\n return ctx\n\n\n@login_required\ndef create_subscription(request, plan_id):\n plan = get_object_or_404(Plan, pk=plan_id)\n plan_terms = {plan.period_unit: plan.period}\n this_time = ddt.datetime.now()\n # noinspection PyTypeChecker\n term_end = this_time + relativedelta(**plan_terms)\n subscription_data = dict(plan=plan, user=request.user, term_start=this_time,\n term_end=term_end, status='New')\n subscription_form = SubscriptionForm(subscription_data)\n if subscription_form.is_valid():\n subscription_form.save()\n return HttpResponseRedirect(reverse('main:home'))\n\n\nclass SubscriptionListView(LoginRequiredMixin, ListView):\n template_name = 'subscriptions.html'\n model = Subscription\n\n def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(user=self.request.user)\n\n\nclass SubscriptionUpdateView(LoginRequiredMixin, UpdateView):\n model = Subscription\n template_name = 'subscription_update.html'\n fields = ['recurring', 'plan']\n success_url = reverse_lazy('main:subscriptions')\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['plans'] = self.object.plan.product.plans.all()\n return ctx\n" }, { "alpha_fraction": 0.7151650786399841, "alphanum_fraction": 0.7263570427894592, "avg_line_length": 29.288135528564453, "blob_id": "074753c201ce737fb3adb952bd2ccb7ac465d17b", "content_id": "a9f2f9c9ee639bc65821e302f1fab1f82d7a15e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1787, "license_type": "no_license", "max_line_length": 92, "num_lines": 59, "path": "/main/tests/factories.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import datetime as ddt\nfrom datetime import timezone\n\nimport factory.fuzzy\n\nfrom main import models\nfrom user.tests.factories import UserFactory\n\n\nclass ProductFactory(factory.django.DjangoModelFactory):\n name = factory.Sequence(lambda x: f'Product {x}')\n description = factory.fuzzy.FuzzyText()\n site = factory.faker.Faker('url')\n product_code = factory.Sequence(lambda x: f'product_{x}')\n\n class Meta:\n model = models.Product\n\n\nclass PlanFactory(factory.django.DjangoModelFactory):\n name = factory.Sequence(lambda x: f'Plan {x}')\n description = factory.fuzzy.FuzzyText()\n product = factory.SubFactory(ProductFactory)\n price = factory.fuzzy.FuzzyDecimal(0.01)\n period = factory.fuzzy.FuzzyInteger(1)\n period_unit = factory.fuzzy.FuzzyChoice(models.Plan.PERIOD_UNITS, getter=lambda x: x[0])\n active = True\n currency_code = factory.fuzzy.FuzzyChoice(models.Plan.CURRENCIES, getter=lambda x: x[0])\n recurring = False\n\n class Meta:\n model = models.Plan\n\n\nclass BasketFactory(factory.django.DjangoModelFactory):\n user = factory.SubFactory(UserFactory)\n\n class Meta:\n model = models.Basket\n\n\nclass BasketItemFactory(factory.django.DjangoModelFactory):\n basket = factory.SubFactory(BasketFactory)\n plan = factory.SubFactory(PlanFactory)\n\n class Meta:\n model = models.BasketItem\n\n\nclass SubscriptionFactory(factory.django.DjangoModelFactory):\n status = 'Active'\n plan = factory.SubFactory(PlanFactory)\n user = factory.SubFactory(UserFactory)\n term_start = factory.fuzzy.FuzzyDateTime(ddt.datetime(2020, 5, 10, tzinfo=timezone.utc))\n term_end = factory.fuzzy.FuzzyDateTime(ddt.datetime(2020, 5, 12, tzinfo=timezone.utc))\n active = True\n\n class Meta:\n model = models.Subscription\n" }, { "alpha_fraction": 0.5962059497833252, "alphanum_fraction": 0.5989159941673279, "avg_line_length": 25.35714340209961, "blob_id": "af9b21aab81ecdf437c7ae0fb7d542887a348b6c", "content_id": "dd74e9ada810b771c10e0b68cc441959f65bf8f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 45, "num_lines": 14, "path": "/user/tests/test_forms.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django.test.testcases import TestCase\n\nfrom user.forms import RegistrationForm\n\n\nclass TestForms(TestCase):\n def test_invalid_registration_form(self):\n data = {\n 'email': '[email protected]',\n 'password1': 'somepassword',\n 'last_name': 'Newt'\n }\n form = RegistrationForm(data)\n self.assertFalse(form.is_valid())\n" }, { "alpha_fraction": 0.7021192312240601, "alphanum_fraction": 0.7102396488189697, "avg_line_length": 41.42856979370117, "blob_id": "144e8771e087021fcc3eca9cd59fa77e4d629400", "content_id": "39fa8dfdf3612e3b9c800efe6625e735cbb77c7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5049, "license_type": "no_license", "max_line_length": 105, "num_lines": 119, "path": "/main/models.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django.core.validators import MinValueValidator\nfrom django.db import models\n\nfrom user.models import User\n\n\nclass ActiveManager(models.Manager):\n def active(self):\n return self.filter(active=True)\n\n\nclass ProductManager(ActiveManager):\n pass\n\n\nclass Product(models.Model):\n name = models.CharField('Name', null=False, blank=False, max_length=60)\n description = models.CharField('Description', null=True, blank=True, max_length=255)\n site = models.CharField('Site', null=False, blank=False, max_length=60)\n active = models.BooleanField('Active', null=False, default=True)\n product_code = models.CharField('Product code', null=True, blank=True, max_length=60)\n date_updated = models.DateTimeField(auto_now=True)\n date_created = models.DateTimeField(auto_now_add=True, null=False)\n\n objects = ProductManager()\n\n def __str__(self):\n return f'<Product: {self.name}>'\n\n\nclass ProductImage(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='images')\n image = models.ImageField(upload_to='product-images')\n thumbnail = models.ImageField(upload_to='product-thumbnails', null=True, blank=True)\n\n\nclass PlanManager(ActiveManager):\n pass\n\n\nclass Plan(models.Model):\n PERIOD_UNITS = (('days', 'Day'), ('weeks', 'Week'), ('months', 'Month'), ('years', 'Year'))\n CURRENCIES = (('us', 'USD'), ('eur', 'EUR'), ('pln', 'PLN'))\n name = models.CharField('Plan name', null=False, blank=False, max_length=120)\n description = models.CharField('Description', null=True, blank=True, max_length=255)\n product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='plans', db_index=True)\n price = models.DecimalField('Price', null=False, blank=False, decimal_places=2, max_digits=8)\n period = models.IntegerField('Period', null=True, blank=True)\n period_unit = models.CharField('Period unit', choices=PERIOD_UNITS, max_length=20)\n active = models.BooleanField('Active', null=False, default=True, blank=True)\n currency_code = models.CharField('Currency', choices=CURRENCIES, max_length=10)\n recurring = models.BooleanField('Recurring', null=False, blank=True, default=False)\n date_updated = models.DateTimeField(auto_now=True, editable=False)\n date_created = models.DateTimeField(auto_now_add=True, null=False)\n\n objects = PlanManager()\n\n class Meta:\n ordering = ['id']\n\n def __str__(self):\n return f'<Plan: {self.name}>'\n\n\nclass PlanItem(models.Model):\n name = models.CharField('Item name', null=False, blank=False, max_length=120)\n description = models.CharField('Description', null=True, blank=True, max_length=255)\n value = models.CharField('Value', null=False, blank=False, max_length=255)\n value_unit = models.CharField('Value unit', null=False, blank=False, max_length=120)\n plan = models.ForeignKey(Plan, on_delete=models.CASCADE, db_index=True, related_name='items')\n date_updated = models.DateTimeField(auto_now=True, editable=False)\n date_created = models.DateTimeField(auto_now_add=True, null=False)\n\n def __str__(self):\n return f'<PlanItem: {self.name}>'\n\n\nclass SubscriptionManager(ActiveManager):\n pass\n\n\nclass Subscription(models.Model):\n status = models.CharField('Status', null=True, blank=True, max_length=30)\n user = models.ForeignKey(User, on_delete=models.PROTECT, related_name='subscriptions', db_index=True)\n plan = models.ForeignKey(Plan, on_delete=models.PROTECT, related_name='subscriptions', db_index=True)\n recurring = models.BooleanField('Recurring', null=False, blank=True, default=False)\n term_start = models.DateTimeField('Term start', null=True, blank=True)\n term_end = models.DateTimeField('Term end', null=True, blank=True)\n active = models.BooleanField('Active', null=False, default=True, blank=True)\n date_updated = models.DateTimeField(auto_now=True, editable=False)\n date_created = models.DateTimeField(auto_now_add=True, null=False)\n\n objects = SubscriptionManager()\n\n def __str__(self):\n return f'<Subscription of plan:{self.plan.name}>'\n\n\nclass Basket(models.Model):\n OPEN = 10\n SUBMITTED = 20\n STATUSES = ((OPEN, 'Open'), (SUBMITTED, 'Submitted'))\n\n user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)\n status = models.IntegerField(choices=STATUSES, default=OPEN, null=True, blank=True)\n date_updated = models.DateTimeField(auto_now=True, editable=False)\n date_created = models.DateTimeField(auto_now_add=True, null=False)\n\n @property\n def count(self):\n return sum(i.quantity for i in self.basketitem_set.all())\n\n\nclass BasketItem(models.Model):\n basket = models.ForeignKey(Basket, on_delete=models.CASCADE)\n plan = models.ForeignKey(Plan, on_delete=models.CASCADE)\n quantity = models.PositiveIntegerField(default=1, validators=[MinValueValidator(1)])\n date_updated = models.DateTimeField(auto_now=True, editable=False, null=True)\n date_created = models.DateTimeField(auto_now_add=True, null=False)\n" }, { "alpha_fraction": 0.5901876091957092, "alphanum_fraction": 0.6176046133041382, "avg_line_length": 27.875, "blob_id": "8426663a8e8ea9baadbafe52de5d9222a9432062", "content_id": "143d02c90937305d44df3347c4ee79358f60ba16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 693, "license_type": "no_license", "max_line_length": 123, "num_lines": 24, "path": "/main/migrations/0003_auto_20201001_2044.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-10-01 20:44\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0002_productimage'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='productimage',\n name='product',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='main.Product'),\n ),\n migrations.AlterField(\n model_name='productimage',\n name='thumbnail',\n field=models.ImageField(blank=True, null=True, upload_to='product-thumbnails'),\n ),\n ]\n" }, { "alpha_fraction": 0.5904936194419861, "alphanum_fraction": 0.6234003901481628, "avg_line_length": 33.1875, "blob_id": "35eca6cdbb20755012160c74f3dde92a20c4b980", "content_id": "046e24575dd0e3871baeb85c0953b7a9c6e2c0e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 547, "license_type": "no_license", "max_line_length": 67, "num_lines": 16, "path": "/main/tests/test_utils.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django.test import TestCase\nfrom main.utils import chunk_generator\n\n\nclass TestUtils(TestCase):\n def test_chunk_generator_list(self):\n lst = list(range(9))\n result = list(chunk_generator(lst, 3))\n self.assertEqual(3, len(result))\n self.assertEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]], result)\n\n def test_chunk_generator_tuple(self):\n seq = tuple(range(9))\n first_chunk = next(chunk_generator(seq, 3))\n self.assertEqual(3, len(first_chunk))\n self.assertEqual((0, 1, 2), first_chunk)\n" }, { "alpha_fraction": 0.548224687576294, "alphanum_fraction": 0.5633280277252197, "avg_line_length": 56.181819915771484, "blob_id": "724b7a29b98f030232346830814b8f45b99bfb86", "content_id": "2342578aa187d2277778078dc7743f4491c80d54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3774, "license_type": "no_license", "max_line_length": 119, "num_lines": 66, "path": "/main/migrations/0004_plan_planitem_subscription.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.7 on 2020-10-01 20:48\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport main.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('main', '0003_auto_20201001_2044'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Plan',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=120, verbose_name='Plan name')),\n ('description', models.CharField(blank=True, max_length=255, null=True, verbose_name='Description')),\n ('price', models.DecimalField(decimal_places=2, max_digits=8, verbose_name='Price')),\n ('period', models.IntegerField(blank=True, null=True, verbose_name='Period')),\n ('period_unit',\n models.CharField(choices=[('day', 'Day'), ('week', 'Week'), ('month', 'Month'), ('year', 'Year')],\n max_length=20, verbose_name='Period unit')),\n ('active', models.BooleanField(blank=True, default=True, verbose_name='Active')),\n ('currency_code',\n models.CharField(choices=[('us', 'USD'), ('eur', 'EUR'), ('pln', 'PLN')], max_length=10,\n verbose_name='Currency')),\n ('recurring', models.BooleanField(blank=True, default=False, verbose_name='Recurring')),\n ('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='plans',\n to='main.Product')),\n ],\n bases=[models.Model],\n ),\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('status', models.CharField(blank=True, max_length=30, null=True, verbose_name='Status')),\n ('recurring', models.BooleanField(blank=True, default=False, verbose_name='Recurring')),\n ('term_start', models.DateTimeField(blank=True, null=True, verbose_name='Term start')),\n ('term_end', models.DateTimeField(blank=True, null=True, verbose_name='Term end')),\n ('active', models.BooleanField(blank=True, default=True, verbose_name='Active')),\n ('plan', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='subscriptions',\n to='main.Plan')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='subscriptions',\n to=settings.AUTH_USER_MODEL)),\n ],\n bases=[models.Model],\n ),\n migrations.CreateModel(\n name='PlanItem',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=120, verbose_name='Plan name')),\n ('description', models.CharField(blank=True, max_length=255, null=True, verbose_name='Description')),\n ('value', models.CharField(max_length=255, verbose_name='Value')),\n ('value_unit', models.CharField(max_length=120, verbose_name='Value unit')),\n ('plan',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='main.Plan')),\n ],\n bases=[models.Model],\n ),\n ]\n" }, { "alpha_fraction": 0.7233502268791199, "alphanum_fraction": 0.7233502268791199, "avg_line_length": 38.400001525878906, "blob_id": "160acdd4e813d0d5117480d729e2d70a29737601", "content_id": "4abefbc332b653ffc30aadc9b62ed197bec4e889", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 788, "license_type": "no_license", "max_line_length": 90, "num_lines": 20, "path": "/user/urls.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from typing import List\n\nfrom django.contrib.auth.views import LoginView\nfrom django.urls import include, path\nfrom django.urls.resolvers import URLPattern\n\nfrom user import forms\nfrom user import views\n\napp_name = 'user'\n\nurlpatterns: List[URLPattern] = [\n path('sign_up/', views.CustomRegistrationView.as_view(), name='sign_up'),\n path('login/', LoginView.as_view(form_class=forms.AuthenticationForm), name='login'),\n path('', include('django_registration.backends.one_step.urls')),\n path('', include('django.contrib.auth.urls')),\n path('account/', views.AccountView.as_view(), name='account'),\n path('account/edit/', views.AccountUpdateView.as_view(), name='account_edit'),\n path('account/address/edit/', views.AddressUpdateView.as_view(), name='address_edit'),\n]\n" }, { "alpha_fraction": 0.7014341354370117, "alphanum_fraction": 0.7027379274368286, "avg_line_length": 28.5, "blob_id": "15d6925cf02df43521dab18fcdf8bd4a6893304e", "content_id": "6c61062c87d3baa2ff59ffd26cbb30d22448bd6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 767, "license_type": "no_license", "max_line_length": 77, "num_lines": 26, "path": "/user/tests/factories.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import factory.fuzzy\n\nfrom user import models\n\n\nclass UserFactory(factory.django.DjangoModelFactory):\n email = factory.Sequence(lambda x: f'user{x}@mail.com')\n password = factory.PostGenerationMethodCall('set_password', 'password')\n first_name = factory.faker.Faker('first_name')\n last_name = factory.faker.Faker('last_name')\n\n class Meta:\n model = models.User\n\n\nclass AddressFactory(factory.django.DjangoModelFactory):\n user = factory.SubFactory(UserFactory)\n address1 = factory.faker.Faker('address')\n phone = factory.faker.Faker('phone_number')\n city = factory.faker.Faker('city')\n country = factory.faker.Faker('country')\n postcode = factory.Faker('postcode')\n is_billing = True\n\n class Meta:\n model = models.Address\n" }, { "alpha_fraction": 0.7552816867828369, "alphanum_fraction": 0.7570422291755676, "avg_line_length": 24.81818199157715, "blob_id": "6800224fca5b3c0da7cfa7153f1e6146c4d0b3f8", "content_id": "b1adf2979dcd8fa9e5878f826a23824e4384cd85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "no_license", "max_line_length": 67, "num_lines": 22, "path": "/main/tasks.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import logging\n\nfrom celery import shared_task\nfrom celery.schedules import crontab\n\nfrom config.celery import app\nfrom main.services import disactivate_expired_subscriptions\n\nlog = logging.getLogger(__name__)\n\[email protected]_after_finalize.connect\ndef setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(\n crontab(hour=6),\n disactivate_expired_subscriptions_task.s(),\n )\n\n\n@shared_task\ndef disactivate_expired_subscriptions_task():\n log.info('Start the task to disactivate expired subscriptions')\n disactivate_expired_subscriptions()\n" }, { "alpha_fraction": 0.7402061820030212, "alphanum_fraction": 0.7402061820030212, "avg_line_length": 31.33333396911621, "blob_id": "b80444b5cec75460e93b8fd1f0be98e7a1d4420c", "content_id": "f3c1707d570b6c1091df463f9865c8173ccbe9b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 485, "license_type": "no_license", "max_line_length": 89, "num_lines": 15, "path": "/user/tasks.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import logging\n\nfrom celery import shared_task\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\nlogger = logging.getLogger(__name__)\n\n\n@shared_task\ndef send_welcome_email_to_user(email):\n logger.info(f'Sending signup email for {email}')\n message = 'Welcome to SubsMan, the subscription management system.'\n send_mail(subject='SubsMan welcome', message=message, from_email=settings.EMAIL_FROM,\n recipient_list=(email,), fail_silently=True)\n" }, { "alpha_fraction": 0.6299136877059937, "alphanum_fraction": 0.6299136877059937, "avg_line_length": 42.45833206176758, "blob_id": "1b111ff79658d7f8de57967e1c0c12b76e449c41", "content_id": "8a8a25181934d25f83128ec9d97027cbefad9b2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1043, "license_type": "no_license", "max_line_length": 110, "num_lines": 24, "path": "/user/tests/test_models.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from django.test import TestCase\n\nfrom user.models import User, Address\nfrom user.tests.factories import AddressFactory\n\n\nclass TestModels(TestCase):\n def test_create_user(self):\n test_user = User.objects.create_user(email='[email protected]', password='Password1!', first_name='Name',\n last_name='Smith')\n self.assertTrue(User.objects.filter(email='[email protected]').exists())\n self.assertFalse(test_user.is_staff)\n self.assertFalse(test_user.is_superuser)\n\n def test_create_superuser(self):\n super_user = User.objects.create_superuser(email='[email protected]', password='Password1!', first_name='John',\n last_name='Smith')\n self.assertTrue(User.objects.filter(email='[email protected]').exists())\n self.assertTrue(super_user.is_staff)\n self.assertTrue(super_user.is_superuser)\n\n def test_create_address(self):\n address = AddressFactory()\n self.assertTrue(Address.objects.filter(id=address.id).exists())\n" }, { "alpha_fraction": 0.7196850180625916, "alphanum_fraction": 0.7196850180625916, "avg_line_length": 27.863636016845703, "blob_id": "cdad818941b8fd31cf7551dfe5c8bc6552d1cb51", "content_id": "514737be2dd561bda9d3b2ecc9e0756efcd81c46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 635, "license_type": "no_license", "max_line_length": 72, "num_lines": 22, "path": "/main/services.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import datetime as ddt\nimport logging\n\nfrom django.db.models import DateField\nfrom django.db.models.functions import Cast\n\nfrom main.models import Subscription\n\nlog = logging.getLogger(__name__)\n\ndef disactivate_expired_subscriptions() -> int:\n '''Function select the expired subscription and disactivate them.'''\n\n disactivated_subs = Subscription.objects.active().annotate(\n expired=Cast('term_end', output_field=DateField())\n ).filter(\n expired__lt=ddt.date.today()\n ).update(\n active=False\n )\n log.info('Disactivated %s expired subscriptions', disactivated_subs)\n return disactivated_subs\n" }, { "alpha_fraction": 0.6782786846160889, "alphanum_fraction": 0.6803278923034668, "avg_line_length": 29.5, "blob_id": "46d2f13a63fdae681bfdc46eb525dde867b1015c", "content_id": "9741aa53aecb93d281ec6e7690c23e942a932aa4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 488, "license_type": "no_license", "max_line_length": 72, "num_lines": 16, "path": "/main/utils.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "from typing import Sequence\n\n\ndef chunk_generator(lst: Sequence, chunk_size: int) -> Sequence:\n \"\"\"Generator which yields the chunks of sequential of specified size\n\n :param lst: An sequential object to divide into chunks\n :type lst: Sequence\n :param chunk_size: A size of one chunk\n :type chunk_size: int\n :return: An sequential object in size of chunk_size\n :rtype: Sequence\n \"\"\"\n\n for i in range(0, len(lst), chunk_size):\n yield lst[i:i + chunk_size]\n" }, { "alpha_fraction": 0.5865384340286255, "alphanum_fraction": 0.6173076629638672, "avg_line_length": 19, "blob_id": "31cf5444d087a44b313af873bd68847f3cf72928", "content_id": "82329f1d9fffc2abfc7dc21abfd2a72b6f2e16ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 520, "license_type": "no_license", "max_line_length": 97, "num_lines": 26, "path": "/Pipfile", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "[[source]]\nname = \"pypi\"\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\n\n[dev-packages]\nfactory-boy = \"*\"\nisort = \"*\"\n\n[packages]\ndjango = \"==3.1.13\"\npsycopg2 = \"==2.8.5\"\ndjango-extensions = \"==3.0.2\"\npydotplus = \"*\"\nwerkzeug = \"*\"\npre-commit = \"*\"\ndjango-registration = \"==3.1.2\"\npillow = \"*\"\nipython = \"*\"\ndjango-environ = \"*\"\ncelery = {extras = [\"redis\"],version = \"*\"}\nvine = \"*\"\ndjango-celery-results = {file = \"https://github.com/celery/django-celery-results/zipball/master\"}\n\n[requires]\npython_version = \"3.8\"\n" }, { "alpha_fraction": 0.659991979598999, "alphanum_fraction": 0.6635963320732117, "avg_line_length": 35.72058868408203, "blob_id": "00fc5b671aa9bebc91109607290e516cea9989ee", "content_id": "533da6ffcb79f96cf6e7c4a634d49c4a9967b44b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2497, "license_type": "no_license", "max_line_length": 109, "num_lines": 68, "path": "/user/forms.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import logging\n\nfrom django import forms\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.forms import UsernameField, UserCreationForm as DjangoRegistrationForm\nfrom django_registration import validators\n\nfrom user import models\nfrom user.tasks import send_welcome_email_to_user\n\nlogger = logging.getLogger(__name__)\n\n\nclass RegistrationForm(DjangoRegistrationForm):\n first_name = forms.CharField(label='First name', required=True, max_length=60)\n last_name = forms.CharField(label='Last name', required=True, max_length=60)\n\n class Meta(DjangoRegistrationForm.Meta):\n model = models.User\n fields = ('email', 'first_name', 'last_name')\n field_classes = {'email': UsernameField}\n\n error_css_class = 'error'\n required_css_class = 'required'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['email'].validators.extend(\n (validators.HTML5EmailValidator(), validators.validate_confusables_email,\n validators.CaseInsensitiveUnique(models.User, 'email', validators.DUPLICATE_EMAIL))\n )\n self.fields['email'].required = True\n\n def send_welcome_email(self):\n email = self.cleaned_data.get('email')\n send_task = send_welcome_email_to_user.delay(email)\n logger.info(f'Send a welcome task to {email}, task: {send_task.id}')\n\n\nclass AuthenticationForm(forms.Form):\n email = forms.EmailField(label='Email')\n password = forms.CharField(strip=False, widget=forms.PasswordInput)\n\n def __init__(self, request=None, *args, **kwargs):\n self.request = request\n self.user = None\n super().__init__(*args, **kwargs)\n\n def clean(self):\n email = self.cleaned_data.get('email')\n password = self.cleaned_data.get('password')\n\n if email is not None and password:\n self.user = authenticate(request=self.request, email=email, password=password)\n if self.user is None:\n raise forms.ValidationError('Invalid email or password.')\n logger.info(f'Authenticate successfully {email}')\n return self.cleaned_data\n\n def get_user(self):\n return self.user\n\n\nclass AddressForm(forms.ModelForm):\n class Meta:\n model = models.Address\n fields = ['user', 'address1', 'address2', 'city', 'postcode', 'country', 'phone', 'billing_address1',\n 'billing_address2', 'billing_city', 'billing_country', 'billing_postcode']\n" }, { "alpha_fraction": 0.6728761196136475, "alphanum_fraction": 0.6813434958457947, "avg_line_length": 45.6184196472168, "blob_id": "4b4ddedc46cff6348e3705ad284ff95076673f1b", "content_id": "6ed6a82562ec1ae3b2ea16e177c989c80eb5376f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3543, "license_type": "no_license", "max_line_length": 101, "num_lines": 76, "path": "/user/models.py", "repo_name": "Zercos/SubsMan", "src_encoding": "UTF-8", "text": "import logging\nfrom typing import List\n\nfrom django.contrib.auth.models import AbstractUser, BaseUserManager\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserManager(BaseUserManager):\n use_in_migrations = True\n\n def _create_user(self, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_user(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', False)\n extra_fields.setdefault('is_superuser', False)\n return self._create_user(email=email, password=password, **extra_fields)\n\n def create_superuser(self, email, password=None, **extra_fields):\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n if extra_fields.get('is_staff') is not True:\n raise ValueError('Super user must have is_staff=True')\n if extra_fields.get('is_superuser') is not True:\n raise ValueError('Super user must have is_superuser=True')\n return self._create_user(email=email, password=password, **extra_fields)\n\n def get_or_none(self, *args, **kwargs):\n try:\n return self.model.objects.get(*args, **kwargs)\n except self.model.DoesNotExist:\n return None\n\n\nclass User(AbstractUser):\n username = None\n email = models.EmailField('email address', unique=True, null=False, blank=False, db_index=True,\n error_messages={'unique': _('A user with this email already exists.')})\n date_updated = models.DateTimeField(_('date updated'), auto_now=True)\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS: List[str] = []\n objects = UserManager()\n\n def _show(self):\n return {key: value for key, value in self.__dict__.items() if not key.startswith('_')}\n\n\nclass Address(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='addresses', db_index=True)\n address1 = models.CharField('Address', max_length=120, null=False, blank=False)\n address2 = models.CharField('Address', max_length=120, null=True, blank=True)\n city = models.CharField('City', max_length=60, null=False, blank=False)\n postcode = models.CharField('Postcode', max_length=10, null=False, blank=False)\n country = models.CharField('Country', max_length=60, null=False, blank=False)\n phone = models.CharField('Phone number', max_length=30, null=True, blank=True)\n is_billing = models.BooleanField(default=True, null=False, blank=False)\n billing_address1 = models.CharField('Billing address', max_length=120, null=True, blank=True)\n billing_address2 = models.CharField('Billing address', max_length=120, null=True, blank=True)\n billing_city = models.CharField('Billing city', max_length=60, null=True, blank=True)\n billing_postcode = models.CharField('Billing postcode', max_length=10, null=True, blank=True)\n billing_country = models.CharField('Billing country', max_length=60, null=True, blank=True)\n date_updated = models.DateTimeField(auto_now=True)\n date_created = models.DateTimeField(auto_now_add=True, null=False)\n\n def _show(self):\n return {key: value for key, value in self.__dict__.items() if not key.startswith('_')}\n" } ]
32
bhartl/optimal-control
https://github.com/bhartl/optimal-control
dee1ad49d57df3946b3119fb1e58d80e68310a05
f0494b0aa388e854d941113a203a128890ebb643
dbdf7664f7a3f1cb246b265cdb56ceea23930c4c
refs/heads/master
2018-01-08T12:55:11.492228
2016-04-07T13:59:01
2016-04-07T13:59:01
44,093,422
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6185205578804016, "alphanum_fraction": 0.6494147777557373, "avg_line_length": 38.727848052978516, "blob_id": "f77650253c1b6af584784babcb0b8b3bd3d5ef14", "content_id": "60fbfe31783454d02f5398bd49b81942c53795d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12559, "license_type": "no_license", "max_line_length": 154, "num_lines": 316, "path": "/python/SuperpositionPulsePlot.py~", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom subprocess import Popen, PIPE, call\n\nimport IOHelper\n\n\n##########################################################################################\n### main routine #########################################################################\ndef superimpose (cavityD,cavityU,par1,par2):\n if (par2 == 0) :\n return (sp.cos(par1*sp.pi)*cavityD[:] + 1j*sp.sin(par1*sp.pi)*cavityU[:])*sp.exp(-1j*par1*sp.pi)\n elif (par2 == 1) :\n return sp.exp(-1j*par1*sp.pi)*cavityD\n else :\n return cavityD\n\ndef pltFunction (cavity1,cavity2,cavity3,plotType):\n if (plotType==0):\n return sp.absolute(cavity1[:])**2,sp.absolute(cavity2[:])**2,sp.absolute(cavity3[:])**2\n elif (plotType==1):\n return sp.real(cavity1[:]),sp.real(cavity2[:]),sp.real(cavity3[:])\n elif (plotType==2):\n return sp.imag(cavity1[:]),sp.imag(cavity2[:]),sp.imag(cavity3[:])\n else: \n return cavity1, cavity2, cavity3\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",start=-1,cut1=1000.0,cut2=1000.0,cut3=1000.0,stop=-1,test=0,plotType=0,impose=0,par1=0.0,par2=0.0):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n\n time['write'][:] *= 1e9\n time['read'][:] *= 1e9\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf]\n\n filename =IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead]\n alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown]\n alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp]\n ### read data ###\n\n ### plotting\n Reg1Down = sp.dot(alphaD.conj(),cavityWrite)\n Reg2Down = sp.dot(alphaD.conj(),cavityMemo)\n Reg2DownRead = Reg2Down + sp.dot(alphaR.conj(),cavityRead)\n\n Reg1Up = sp.dot(alphaU.conj(),cavityWrite)\n Reg2Up = sp.dot(alphaU.conj(),cavityMemo)\n Reg2UpRead = Reg2Up + sp.dot(alphaR.conj(),cavityRead)\n\n Reg1Super = superimpose(Reg1Down,Reg1Up,par1,par2)\n Reg2Super = superimpose(Reg2Down,Reg2Up,par1,par2)\n Reg2SuperRead = Reg2Super + sp.dot(alphaR.conj(),cavityRead)\n\n Reg2Read = sp.dot(alphaR.conj(),cavityRead)\n\n minT = min(functime) # min(time['read'])\n maxT = max(functime) # min(time['read'])\n\n minYComplete = -0.6\n maxYComplete = 0.6\n\n minYInfo = -0.6\n maxYInfo = 0.4\n\n minYRead = -0.4\n maxYRead = 0.6\n\n fs = 25\n\n plt.subplot(5,3,1)\n plt.title(\"state '0'\",fontsize=fs)\n plt.plot(time['read'],Reg2DownRead.real,color=\"blue\",linewidth=2)\n plt.plot(time['read'],Reg2DownRead.imag,color=\"lightblue\",linewidth=2)\n plt.ylabel(\"$A(t)$\",fontsize=fs)\n plt.ylim([minYComplete,maxYComplete])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,2)\n plt.title(\"state '1'\",fontsize=fs)\n plt.plot(time['read'],Reg2UpRead.real,color=\"red\",linewidth=2)\n plt.plot(time['read'],Reg2UpRead.imag,color=\"orange\",linewidth=2)\n plt.ylim([minYComplete,maxYComplete])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,3)\n plt.title(\"phaseshift $\\phi_0=${:}$\\pi$\".format(par1),fontsize=fs)\n plt.plot(time['read'],Reg2SuperRead.real,color=\"green\",linewidth=2)\n plt.plot(time['read'],Reg2SuperRead.imag,color=\"lightgreen\",linewidth=2)\n plt.ylim([minYComplete,maxYComplete])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,4)\n plt.plot(time['read'],sp.absolute(Reg2DownRead)**2,color=\"blue\",linewidth=2)\n plt.ylabel(\"$|A(t)|^2$\",fontsize=fs)\n plt.ylim([0,(max(sp.absolute(minYComplete),sp.absolute(maxYComplete))**2)])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,5)\n plt.plot(time['read'],sp.absolute(Reg2UpRead)**2,color=\"red\",linewidth=2)\n plt.ylim([0,(max(sp.absolute(minYComplete),sp.absolute(maxYComplete))**2)])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,6)\n plt.plot(time['read'],sp.absolute(Reg2SuperRead)**2,color=\"green\",linewidth=2)\n plt.ylim([0,(max(sp.absolute(minYComplete),sp.absolute(maxYComplete))**2)])\n plt.xlim([minT,maxT])\n\n\n plt.subplot(5,3,7)\n plt.plot(time['read'],Reg2Down.real,color=\"blue\",linewidth=2)\n plt.plot(time['read'],Reg2Down.imag,color=\"lightblue\",linewidth=2)\n plt.ylabel(\"$\\\\tilde{A}(t)$\",fontsize=fs)\n plt.ylim([minYInfo,maxYInfo])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,8)\n plt.plot(time['read'],Reg2Up.real,color=\"red\",linewidth=2)\n plt.plot(time['read'],Reg2Up.imag,color=\"orange\",linewidth=2)\n plt.ylim([minYInfo,maxYInfo])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,9)\n plt.plot(time['read'],Reg2Super.real,color=\"green\",linewidth=2)\n plt.plot(time['read'],Reg2Super.imag,color=\"lightgreen\",linewidth=2)\n plt.ylim([minYInfo,maxYInfo])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,10)\n plt.plot(time['read'],sp.absolute(Reg2Down)**2,color=\"blue\",linewidth=2)\n plt.ylabel(\"$|\\\\tilde{A}(t)|^2$\",fontsize=fs)\n plt.ylim([0,(max(sp.absolute(minYInfo),sp.absolute(maxYInfo)))])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,11)\n plt.plot(time['read'],sp.absolute(Reg2Up)**2,color=\"red\",linewidth=2)\n plt.ylim([0,(max(sp.absolute(minYInfo),sp.absolute(maxYInfo)))])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,12)\n plt.plot(time['read'],sp.absolute(Reg2Super)**2,color=\"green\",linewidth=2)\n plt.ylim([0,(max(sp.absolute(minYInfo),sp.absolute(maxYInfo)))])\n plt.xlim([minT,maxT])\n\n plt.subplot(5,3,13)\n plt.plot(time['read'],Reg2Read.real,linewidth=2,color=\"black\")\n plt.plot(time['read'],Reg2Read.imag,linewidth=2,color=\"darkgray\")\n plt.ylim([minYRead,maxYRead])\n plt.xlim([minT,maxT])\n plt.ylabel(\"$\\\\tilde{A}_R(t)$\",fontsize=fs)\n plt.xlabel(\"$t$ in ns\",fontsize=fs)\n\n plt.subplot(5,3,14)\n plt.plot(time['read'],Reg2Read.real,linewidth=2,color=\"black\")\n plt.plot(time['read'],Reg2Read.imag,linewidth=2,color=\"darkgray\")\n plt.ylim([minYRead,maxYRead])\n plt.xlim([minT,maxT])\n plt.xlabel(\"$t$ in ns\",fontsize=fs)\n\n plt.subplot(5,3,15)\n plt.plot(time['read'],Reg2Read.real,linewidth=2,color=\"black\")\n plt.plot(time['read'],Reg2Read.imag,linewidth=2,color=\"darkgray\")\n plt.ylim([minYRead,maxYRead])\n plt.xlim([minT,maxT])\n plt.xlabel(\"$t$ in ns\",fontsize=fs)\n\n# plt.subplot(4,3,5)\n# plt.plot(functime,ReadUpReg2[ti:tf].imag,color=\"darkgray\",linewidth=2)\n# plt.plot(functime,cavityUpReg3[ti:tf].imag,color=\"red\",linewidth=2)\n# plt.xlim([min(functime),max(functime)])\n\n# plt.subplot(4,3,3)\n# if (impose != 1):\n# plt.title(\"$\\\\alpha_{S}$\")\n# plt.bar(sp.arange(1,nStore+1,1),alphaS.real,color=\"green\")\n# else :\n# plt.title(\"superposition, $A(t)$\")\n# plt.plot(functime,ReadSuperReg2[ti:tf].real,color=\"darkgray\",linewidth=2)\n# plt.plot(functime,cavitySuperReg3[ti:tf].real,color=\"green\",linewidth=2)\n# plt.xlim([min(functime),max(functime)])\n\n# plt.subplot(4,3,6)\n# if (impose != 1):\n# plt.bar(sp.arange(1,nStore+1,1),alphaS.imag,color=\"green\")\n# else :\n# plt.plot(functime,ReadSuperReg2[ti:tf].imag,color=\"darkgray\",linewidth=2)\n# plt.plot(functime,cavitySuperReg3[ti:tf].imag,color=\"green\",linewidth=2)\n# plt.xlim([min(functime),max(functime)])\n\n\n# plt.subplot2grid((4,3),(2,0),colspan=3,rowspan=2)\n\n# cavityCheckReg1,cavityCheckReg2,cavityCheckReg3 = pltFunction(cavityCheckReg1,cavityCheckReg2,cavityCheckReg3,plotType)\n# cavitySuperReg1,cavitySuperReg2,cavitySuperReg3 = pltFunction(cavitySuperReg1,cavitySuperReg2,cavitySuperReg3,plotType)\n# cavityDownReg1, cavityDownReg2, cavityDownReg3 = pltFunction(cavityDownReg1, cavityDownReg2, cavityDownReg3, plotType)\n# cavityUpReg1, cavityUpReg2, cavityUpReg3 = pltFunction(cavityUpReg1, cavityUpReg2, cavityUpReg3, plotType)\n\n# if (test==1):\n# print (\"### compile fortran routines\")\n# cmd = \"./scripts/ifort-memoryHarmonics.sh \" + wd\n# call(cmd.split())\n\n# print (\"### call memoryOptimized\")\n# cmd=wd+\"memoryOptimized\"\n# call(cmd.split())\n\n# print (\"### call memorySuperimposed\")\n# cmd=wd+\"memorySuperimposed\"\n# generateSuperposition = Popen(cmd.split(), stdin=PIPE) # run fortran program with piped standard input\n# cmd = \"echo {:}\".format(par1) # communication with fortran-routine: chose superposition parameter\n# generateInput = Popen(cmd.split(), stdout=generateSuperposition.stdin) # send action to fortran program\n# output = generateSuperposition.communicate()[0]\n# generateInput.wait()\n\n# filename = cfg['FILES']['{prefix}']+cfg['FILES']['{name_readwrite}']+\\\n# cfg['FILES']['{name_storage}']+cfg['FILES']['{name_optimized}']\n\n# mytimeU,__,A_Re_U,A_Im_U = sp.loadtxt(filename+\"cavity_up_stored\" +cfg['FILES']['{postfix}']).T\n# mytimeD,__,A_Re_D,A_Im_D = sp.loadtxt(filename+\"cavity_down_stored\" +cfg['FILES']['{postfix}']).T\n# mytimeS,__,A_Re_S,A_Im_S = sp.loadtxt(filename+\"cavity_super_stored\"+cfg['FILES']['{postfix}']).T\n\n# cavityMode_U,cavityMode_D,cavityMode_S = pltFunction(A_Re_U+1j*A_Im_U, A_Re_D+1j*A_Im_D, A_Re_S+1j*A_Im_S,plotType)\n\n# cavityMaxDown = max(max(cavityDownReg1),max(cavityDownReg2),max(cavityDownReg3))\n# cavityMaxUp = max(max(cavityUpReg1),max(cavityUpReg2),max(cavityUpReg3))\n# cavityMaxSuper= max(max(cavitySuperReg1),max(cavitySuperReg2),max(cavitySuperReg3))\n# cavityMax = max(cavityMaxDown,cavityMaxUp,cavityMaxSuper)\n\n# cavityMinDown = min(min(cavityDownReg1),min(cavityDownReg2),min(cavityDownReg3))\n# cavityMinUp = min(min(cavityUpReg1),min(cavityUpReg2),min(cavityUpReg3))\n# cavityMinSuper= max(min(cavitySuperReg1),min(cavitySuperReg2),min(cavitySuperReg3))\n# cavityMin = min(cavityMinDown,cavityMinUp,cavityMinSuper)\n\n# plt.plot(time['write'],cavityDownReg1,color=\"blue\",linewidth=2)\n# plt.plot(time['store'],cavityDownReg2,color=\"blue\",linewidth=2)\n# plt.plot(time['read'] ,cavityDownReg3,color=\"blue\",linewidth=2,label=\"state '0'\")\n\n# plt.plot(time['write'],cavityUpReg1,color=\"red\",linewidth=2)\n# plt.plot(time['store'],cavityUpReg2,color=\"red\",linewidth=2)\n# plt.plot(time['read'] ,cavityUpReg3,color=\"red\",linewidth=2,label=\"state '1'\")\n\n# if (impose == 1):\n# plt.plot(time['write'],cavitySuperReg1,color=\"green\",linewidth=2)\n# plt.plot(time['store'],cavitySuperReg2,color=\"green\",linewidth=2)\n# plt.plot(time['read'] ,cavitySuperReg3,color=\"green\",linewidth=2,label=\"super state\")\n\n# if (test == 1):\n# plt.plot(mytimeD,cavityMode_D,label=\"state '0' test\",color=\"cyan\")\n# plt.plot(mytimeU,cavityMode_U,label=\"state '1' test\",color=\"magenta\")\n# if (impose == 1):\n# plt.plot(time['write'],cavityCheckReg1,color=\"orange\")\n# plt.plot(time['store'],cavityCheckReg2,color=\"orange\")\n# plt.plot(time['read'] ,cavityCheckReg3,color=\"orange\",label=\"super state\")\n# plt.plot(mytimeS,cavityMode_S,label=\"super state test\",color=\"brown\")\n\n## plt.legend()\n# plt.xlabel('time in ns')\n# plt.ylabel('$|A(t)|^2$')\n\n# \n# if start != -1 and stop != -1:\n# plt.xlim([start,stop])\n# else:\n# plt.xlim([min(time['write']),max(time['read'])])\n\n# plt.ylim([cavityMin*1.1,cavityMax*1.1])\n\n# plt.fill_between(functime, cavityMax*1.1, cavityMin*1.1, color='lightgray', facecolor='lightgray', alpha=0.5)\n# plt.plot([time['store'][0],time['store'][0]], [cavityMin*1.1,cavityMax*1.1], 'k--')\n# plt.plot([time['read'] [0],time['read'] [0]], [cavityMin*1.1,cavityMax*1.1], 'k--')\n\n# plt.plot([cut1,cut1],[0,cavityMax*1.1],linewidth=2.0)\n# plt.plot([cut2,cut2],[0,cavityMax*1.1],linewidth=2.0)\n# plt.plot([cut3,cut3],[0,cavityMax*1.1],linewidth=2.0)\n\n plt.show()\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.5889220237731934, "alphanum_fraction": 0.6133251190185547, "avg_line_length": 39.09859085083008, "blob_id": "c2e059f0d4774bddc73f6a8f83e4d489fa726455", "content_id": "f9e7c8696771c8d4948bda7c80b746c7309a78f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11392, "license_type": "no_license", "max_line_length": 140, "num_lines": 284, "path": "/python/SmallestOverlapVariation.py~", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\n\nimport ConfigParser as cp\nfrom subprocess import Popen, PIPE\nimport multiprocessing\nimport scipy as sp\nimport os\nimport shutil\nimport glob\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\n#import matplotlib.pylab as plt\n#from matplotlib import rc\n\nimport argh\nimport IOHelper\n\ndef run_single_job(writeBase=None,readBase=None,datPath=\"dat/\",prefix=\"pa\",destPath=\"./parallel/\",generationType=\"r\",funcCfg=0):\n \"\"\" \"\"\"\n #define new working directories + dependencies\n\n configPath=\"./python/parameter.cfg\"\n print (\"load from config file: \" + configPath)\n configParser = cp.ConfigParser()\n configParser.read(configPath)\n cfg=configParser.__dict__['_sections'].copy() \n\n newDir=dest(destPath,writeBase,readBase)\n# newDir=destPath+\"interval_{:07.3f}w_{:07.3f}r_base/\".format(writeBase,readBase)\n\n print (\"create folder dependencies:\" + newDir)\n if not os.path.exists(newDir):\n os.makedirs(newDir)\n if not os.path.exists(newDir+\"dat\"):\n os.makedirs(newDir+\"dat\")\n if not os.path.exists(newDir+\"var\"):\n os.makedirs(newDir+\"var\")\n\n for srcDir in glob.glob(\"./src*\"):\n if os.path.exists(newDir+srcDir): \n shutil.rmtree(newDir+srcDir) \n shutil.copytree(srcDir, newDir+srcDir)\n\n\n #define config file paramters\n if funcCfg == 0 :\n configParser.set('OCFourier','{write_base}',\"{:.3f}D0\".format(writeBase))\n configParser.set('OCFourier','{read_base}',\"{:.3f}D0\".format(readBase))\n configParser.set('OCTime','{t3_functional}',\"T2functional+Pi/writeBase\")\n configParser.set('OCTime','{t3_functional}',\"T2functional+Pi/readBase\")\n elif funcCfg == 1: \n configParser.set('OCFourier','{write_base}',\"{:.3f}D0\".format(writeBase))\n configParser.set('OCTime','{t3_functional}',\"T2functional+Pi/writeBase/10D0*{:.3f}D0\".format(readBase))\n else:\n configParser.set('OCFourier','{write_base}',\"{:.3f}D0\".format(writeBase))\n configParser.set('OCFourier','{read_base}',\"{:.3f}D0\".format(readBase))\n\n configParser.set('FILES','{prefix}',newDir+datPath+prefix)\n configParser.set('FILES','{prefix_var}',newDir+datPath+prefix)\n\n #write config file\n with open(newDir+\"parameter.cfg\", 'wb') as configfile:\n configParser.write(configfile)\n\n# cmd=newDir+\"test.sh \" + destPath +\" {:07.3f} {:07.3f} \".format(writeBase,readBase)\n# cmd=\"./python/evaluateMinimum.py \" + newDir + \" \" + newDir + \"parameter.cfg g\"\n cmd=\"./python/SmallestOverlapEvaluate.py -b=\" + newDir + \" -c=\" + newDir + \"parameter.cfg -g=\" + generationType\n print (cmd)\n log=newDir+\"evaluate.log\"\n optimize = Popen(cmd.split(),stdout=open(log, 'wb'))\n# optimize = Popen(cmd.split())\n optimize.wait()\n\n if (newDir != \"./\" or newDir != \"\"):\n for srcDir in glob.glob(newDir+\"src*\"):\n if os.path.exists(srcDir): \n shutil.rmtree(srcDir) \n\ndef dest(destPath,writeBase,readBase):\n return destPath+\"interval_{:07.3f}w_{:07.3f}r_base/\".format(writeBase,readBase)\n\n\ndef main_routine (writeBase=8.0,writeCnt=1,readBase=2.0,readCnt=1,datPath=\"dat/\",prefix=\"pa\",destPath=\"../parallel/\",gentype=\"c\",funcCfg=0):\n \"\"\"\n Parameters:\n -----------\n --writeBase: maximum value of writing time interval in multiples of Pi/{base_rabi}.\n (float)\n --wirteCnt: number of different writing intervals from 1.0 to writeBase times Pi/{base_rabi}\n (int)\n --readBase: maximum value of reading time interval in multiples of Pi/{base_rabi}.\n (float)\n --readCnt: number of different reading intervals from 1.0 to readBase times Pi/{base_rabi}\n (int)\n --destPath: directory where output files are generated (for every parameter setting of\n writeBase/writeCnt, readBase/readCnt a corresponding subdirectory is created)\n (string)\n --gentype : type of data generation -->\n g : generate data with modNv\n r : read data with modNv and \n c : collect data and produce 3d plot of reading area\n \"\"\"\n dWrite = sp.array(range(0,writeCnt))\n dRead = sp.array(range(0,readCnt))\n\n if (writeCnt == 1):\n write_base = sp.ones([readCnt])*writeBase\n else:\n write_base = (1.0+(writeBase-1.0)/float(writeCnt-1)*dWrite[:])\n\n if (readCnt == 1):\n read_base = sp.ones([writeCnt])*readBase\n else:\n read_base = (1.0+(readBase-1.0)/float(readCnt-1)*dRead[:])\n\n print read_base\n\n# print (write_base)\n# print (read_base)\n\n if (gentype==\"g\" or gentype==\"r\"): # generate\n pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())\n\n results = [ pool.apply_async(run_single_job, args=(wb,rb,datPath,prefix,destPath,gentype,funcCfg))\n for wb,rb in zip(write_base, read_base) ]\n\n results = [ p.get() for p in results ]\n elif (gentype==\"c\"): # collect\n\n configPath=\"./python/parameter.cfg\"\n print (\"load from config file: \" + configPath)\n configParser = cp.ConfigParser()\n configParser.read(configPath)\n cfg=configParser.__dict__['_sections'].copy()\n \n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n\n timeCntWrite = int(cfg['OCTime']['{write_timecnt}'])\n timeCntRead = int(cfg['OCTime']['{read_timecnt}'])\n timeCnt = timeCntRead+timeCntWrite\n\n omega_c=float(cfg['NVSETUP']['{omega_c}'])*2.0*sp.pi\n omega_r=float(cfg['OCFourier']['{base_rabi}'])/1e6\n tUnit =sp.pi/(2.0*sp.pi*1e3*omega_r)\n\n filelist=[]\n div =10.0\n# readCnt = 17\n cavity=sp.zeros((2,readCnt,timeCntRead))\n time =sp.zeros(timeCnt)\n wb=write_base[-1] # choose last element\n errCnt=[]\n\n name_optimized=cfg['FILES']['{name_optimized}']\n name_readwrite=IOHelper.getNameReadWrite(**cfg) \n\n\n for rb in range(readCnt):\n newDir=dest(destPath,wb,read_base[rb])\n cfg['FILES']['{prefix}'] = newDir+datPath+prefix+name_readwrite+name_optimized\n\n try:\n filename=cfg['FILES']['{prefix}']+\"cavityMode_read_down\"+cfg['FILES']['{postfix}']\n print filename\n time,real,imag = sp.loadtxt(filename).T\n cavity[0,rb,:] = real[timeCntWrite:timeCntWrite+timeCntRead]**2+imag[timeCntWrite:timeCntWrite+timeCntRead]**2\n except:\n errCnt.append(\"down: {:8.4f}\\n\".format(read_base[rb]))\n\n try:\n filename=cfg['FILES']['{prefix}']+\"cavityMode_read_up\"+cfg['FILES']['{postfix}']\n time,real,imag = sp.loadtxt(filename).T\n cavity[1,rb,:] = real[timeCntWrite:timeCntWrite+timeCntRead]**2+imag[timeCntWrite:timeCntWrite+timeCntRead]**2\n except:\n errCnt.append(\"up: {:8.4f}\\n\".format(read_base[rb]))\n\n if (len(errCnt) != 0):\n print (\"can't read some files:\\n\" + str(errCnt))\n\n print (\"generating color map\")\n\n # read funtional times t2, t3\n newDir=dest(destPath,wb,read_base.max())\n cfg['FILES']['{prefix}']=newDir+datPath+prefix\n functime =IOHelper.functionaltimes_readwrite(**cfg)\n\n ti=float(functime['ti'])/omega_c/tUnit # in nano seconds\n ti_i = int(functime['idx_ti'])-1\n\n tf=float(functime['tf'])/omega_c/tUnit # in nano seconds\n tf_i = int(functime['idx_tf'])\n\n functimeX =sp.array([1.0,readBase])\n functimeY =sp.array([(tf+ti)/2.0,tf])\n cutShortX =sp.array([1.0,1.0])\n cutLargeX =sp.array([readBase,readBase])\n cutShortY =sp.array([writeBase,writeBase+readBase/2.0])\n cutLargeY =sp.array([writeBase,writeBase+readBase])\n functimeZ =sp.array([-1.5,-1.5])\n functimeYMid=sp.array([ti + (tf-ti)/4.0,ti+ (tf-ti)/2.0])\n # read funtional times t2, t3\n\n # define the grid over which the function should be plotted (xx and yy are matrices)\n xx, yy = sp.meshgrid(sp.linspace(1.0,readBase,readCnt),\n sp.linspace(ti,tf,tf_i-ti_i))\n zz0 = cavity[0,:,ti_i:tf_i].T/cavity[:,:,ti_i:tf_i].max()\n zz1 = cavity[1,:,ti_i:tf_i].T/cavity[:,:,ti_i:tf_i].max()\n\n# xx, yy = sp.meshgrid(sp.linspace(1.0,readBase,readCnt),\n# sp.linspace(time[timeCntRead/div]/tUnit,time[-1]/tUnit,timeCntRead/div))\n# zz1 = cavity[0,:,:].T/cavity[:,:,:].max()\n# zz1 = cavity[1,:,:].T/cavity[:,:,:].max()\n\n font = {\n 'fontsize' : 26,\n 'verticalalignment' : 'top',\n 'horizontalalignment' : 'center'\n }\n\n# cm('font', **font)\n# rc('text', usetex=True)\n\n\n fig = plt.figure()\n# ax = fig.gca(projection='3d')\n fig0 = fig.add_subplot(121, projection='3d')\n fig0.plot_surface(xx, yy, zz0, rstride=10, cstride=5, cmap=cm.Blues, alpha=0.5,zorder=11.0,vmin=-0.25, vmax=1)\n fig0.contourf(xx, yy, zz0, zdir='z', offset=-1.5, cmap=cm.Blues,vmin=-0.25, vmax=1)\n fig0.plot(functimeX, functimeY, functimeZ,'k--',zorder=10.0) \n fig0.plot(functimeX, functimeYMid, functimeZ,'k--',zorder=10.0) \n fig0.plot(cutShortX, cutShortY, functimeZ,'m-',linewidth=2.5,zorder=10.0) \n fig0.plot(cutLargeX, cutLargeY, functimeZ,'g-',linewidth=2.5,zorder=10.0) \n fig0.set_title(\"a) state \\\"0\\\"\", **font)\n fig0.set_xlabel(\"$\\Delta T_{\\cal F} \\, / \\, \\\\frac{T_R}{2}$\", **font)\n fig0.set_ylabel(\"$t \\, / \\, \\\\frac{T_R}{2}$\", **font)\n fig0.set_zlabel(\"$\\left|A(t)\\\\right|^2 \\, / \\, \\left|A_{max}(t)\\\\right|^2$\", **font)\n fig0.set_zlim(-1.5, 1.0)\n\n fig1 = fig.add_subplot(122, projection='3d')\n fig1.plot_surface(xx, yy, zz1, rstride=10, cstride=5, cmap=cm.Reds, alpha=0.5,zorder=11.0,vmin=-0.25, vmax=1)\n fig1.contourf(xx, yy, zz1, zdir='z', offset=-1.5, cmap=cm.Reds,vmin=-0.25, vmax=1)\n fig1.plot(functimeX, functimeY, functimeZ,'k--',zorder=10.0) \n fig1.plot(functimeX, functimeYMid, functimeZ,'k--',zorder=10.0) \n fig1.plot(cutShortX, cutShortY, functimeZ,'m-',linewidth=2.5,zorder=10.0) \n fig1.plot(cutLargeX, cutLargeY, functimeZ,'g-',linewidth=2.5,zorder=10.0) \n fig1.set_title(\"b) state \\\"1\\\"\", **font)\n fig1.set_xlabel(\"$\\Delta T_{\\cal F} \\, / \\, \\\\frac{T_R}{2}$\", **font)\n fig1.set_ylabel(\"$t \\, / \\, \\\\frac{T_R}{2}$\", **font)\n fig1.set_zlabel(\"$\\left|A(t)\\\\right|^2 \\, / \\, \\left|A_{max}(t)\\\\right|^2$\", **font)\n fig1.set_zlim(-1.5, 1.0)\n\n\n# plt.subplot(1, 2, 1)\n# plt.pcolor(xx,yy,zz0)\n# plt.axis([xx.min(),xx.max(),yy.min(),yy.max()])\n# plt.plot(functimeX,functimeY, 'w', linewidth=2)\n# plt.plot(functimeX,functimeYMid, 'w-', linewidth=2)\n# plt.title(\"time bin state \\\"0\\\"\")\n# plt.xlabel(\"$T_{functional} \\, / \\, \\\\frac{\\pi}{\\Omega_R[MHz]}$\", **font)\n# plt.ylabel(\"$t \\, / \\, \\\\frac{\\pi}{\\Omega_R[MHz]}$\", **font)\n\n# plt.subplot(1, 2, 2)\n# plt.pcolor(xx,yy,zz1)\n# plt.axis([xx.min(),xx.max(),ti,tf])\n# plt.plot(functimeX,functimeY, 'w', linewidth=2)\n# plt.plot(functimeX,functimeYMid, 'w-', linewidth=2)\n# plt.title(\"time bin sstate \\\"1\\\"\")\n# plt.xlabel(\"$T_{functional} \\, / \\, \\\\frac{\\pi}{\\Omega_R[MHz]}$\", **font)\n\n# plt.colorbar(label=\"$\\left|A_{0,/1}(t)\\\\right|^2 \\, / \\, max(\\left|A(t))\\\\right|^2$\")\n\n plt.show()\n\n else:\n print (\"option unkown: gentype=\"+gentype)\n \n \nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n" }, { "alpha_fraction": 0.5046687722206116, "alphanum_fraction": 0.5236592888832092, "avg_line_length": 40.26823043823242, "blob_id": "047607bca980908c183116523e8c3467671f3926", "content_id": "48eab09291fabe6fdb7a904aeaa845b90a84d100", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15850, "license_type": "no_license", "max_line_length": 145, "num_lines": 384, "path": "/python/MemoryPulsePhasespace.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nimport scipy.linalg as la\nimport matplotlib.pyplot as plt\nimport multiprocessing\n\nimport MemoryPulseFunctional\nfrom MemoryPulseFunctional import cons, fun, dim, conf\nimport IOHelper\nfrom IOHelper import replace_in_file\n\nfrom math import sqrt, sin, cos, pi\nfrom subprocess import Popen, PIPE, call\nfrom scipy import linspace, outer, ones, sin, cos, size\nfrom scipy.interpolate import interp2d\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\n##########################################################################################\n##########################################################################################\n##########################################################################################\n\n\n##########################################################################################\ndef evaluateSphericalVariation (phi,theta,cntPhi,cntTheta):\n global conf,cons\n success = False\n\n alpha0 =sp.zeros([dim['alpha']],complex)\n alpha0[conf['id0']]=sp.cos(phi)*sp.sin(theta)+0j\n alpha0[conf['id1']]=sp.sin(phi)*sp.sin(theta)+0j\n alpha0[conf['id2']]=sp.cos(theta)+0j\n\n# if (sp.absolute(alpha0[conf['id0']]) <= 1e-10): \n# alpha0[conf['id0']]=0.0+0j\n# if (sp.absolute(alpha0[conf['id1']]) <= 1e-10): \n# alpha0[conf['id1']]=0.0+0j\n# if (sp.absolute(alpha0[conf['id2']]) <= 1e-10): \n# alpha0[conf['id2']]=0.0+0j\n# \n # normalize coefficients for alpha -> defines net-power\n alpha0[:]=alpha0[:]/sp.linalg.norm(alpha0[:])*cons['alpha_norm']\n __,res = MemoryPulseFunctional.evaluateFunctional(alpha0,1.0+0j)\n \n myRes = sp.zeros([conf['entries']+3])\n myRes[0] = alpha0[conf['id0']].real\n myRes[1] = alpha0[conf['id1']].real\n myRes[2] = alpha0[conf['id2']].real\n myRes[3:]= res\n \n print \"### spherical map: phi/pi={0:5.3f}, theta/pi={1:5.3f}, fun={2:f}\".format(phi/sp.pi,theta/sp.pi,myRes[conf['funval']])\n\n myRes[conf['funval']] = min(conf['cutoff'],res[conf['funval']])\n\n return myRes,cntPhi,cntTheta\n##########################################################################################\n\n\n##########################################################################################\n### main routine #########################################################################\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",generationType=\"p\",\\\n toMinimize =2,\\\n cavityMatch=1,\\\n silent=0,\\\n useBeta=0,\\\n cutoff=10000,dimGrid=11,id0=0,id1=1,id2=2,pltId='funval',pltLim=1000,pltBins=40,myCmap=\"brg\"):\n print \"#################################################################\"\n print \"#################################################################\"\n print \"### optimal control #############################################\"\n print \"### memory pulse evaluation #####################################\"\n print \"#################################################################\"\n print \"#################################################################\"\n\n ### globals for functional variation\n global cons, fun, dim, conf\n\n conf['toMinimize'] =toMinimize\n conf['cavityMatch']=cavityMatch\n conf['silent'] =silent\n if (useBeta == 0):\n conf['useBeta'] = False\n else:\n conf['useBeta'] = True\n\n conf['id0'] =id0\n conf['id1'] =id1\n conf['id2'] =id2\n conf['cutoff'] =cutoff\n\n ### globals for functional variation\n\n ### generate working environment ###\n print (\"### working directory: \" + wd)\n tmpDir = wd+\"tmp/\" \n cmd = \"mkdir -p \" + tmpDir\n call(cmd.split())\n ### generate working environment ###\n\n ### read config file ###\n print (\"### load config file: \" + cfg)\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n conf['MEConstraints'] = cfg['MEConstraints'].copy()\n conf['FITNESS']= cfg['FITNESS'].copy()\n conf['FITNESS']['{mutationrate}']=sp.zeros([conf['entries']])\n conf['FITNESS']['{mutationrate}'][conf['funval']] =cfg['FITNESS']['{mut_functional}']\n conf['FITNESS']['{mutationrate}'][conf['fidelity_down']]=cfg['FITNESS']['{mut_fidelity_down}']\n conf['FITNESS']['{mutationrate}'][conf['fidelity_up']] =cfg['FITNESS']['{mut_fidelity_up}']\n conf['FITNESS']['{mutationrate}'][conf['memolap']] =cfg['FITNESS']['{mut_memolap}']\n conf['FITNESS']['{mutationrate}'][conf['alpha']] =cfg['FITNESS']['{mut_alpha}']\n conf['FITNESS']['{mutationrate}'][conf['beta']] =cfg['FITNESS']['{mut_beta}']\n conf['FITNESS']['{mutationrate}'][conf['success']] =cfg['FITNESS']['{mut_success}']\n\n cons['alpha_norm']=float(cfg['MEConstraints'][\"{storage_amplitude}\"])\n cons['beta_low'] =float(cfg['MEConstraints'][\"{limit_low_beta}\"])\n cons['beta_top'] =float(cfg['MEConstraints'][\"{limit_top_beta}\"])\n cons['chi2_Tol'] =float(cfg['MEConstraints']['{tol_chi2}'])\n\n dim['alpha']=int(cfg['MEFourier']['{storage_harmonic}'])\n dim['total']=dim['alpha']+3\n\n prefix =cfg['FILES']['{prefix}']\n postfix =cfg['FILES']['{postfix}']\n name_optimized=cfg['FILES']['{name_optimized}']\n\n name_spin =cfg['FILES']['{name_spin}']\n name_cavity =cfg['FILES']['{name_cavity}']\n\n cfg['dim_grid'] =dimGrid\n name_readwrite=IOHelper.getNameReadWrite(**cfg) \n name_storage =IOHelper.getNameStorage (**cfg) \n name_varinit =IOHelper.getNameInitialVariation (**cfg)\n\n myTime =IOHelper.functionaltimes_readwrite(**cfg) # reads time and updates cfg: \n # cfg['METime']['{fidelity_ti}'] = myTime['idx_ti']\n # cfg['METime']['{fidelity_tf}'] = myTime['idx_tf']\n\n\n gridPhi =sp.linspace(0.0, 2.0*sp.pi, num=2*dimGrid-1)\n gridTheta =sp.linspace(0.0, sp.pi, num=dimGrid)\n minfun =sp.zeros([len(gridPhi),len(gridTheta),conf['entries']+3])\n\n if (generationType == \"p\"):\n print \"## read from file: \" + name_varinit\n raw = sp.loadtxt(name_varinit)\n minfun = raw.reshape(len(gridPhi),len(gridTheta),conf['entries']+3)\n for i in sp.arange(0,len(gridPhi),1):\n for j in sp.arange(0,len(gridTheta),1):\n minfun[i,j,conf['fitness']] = MemoryPulseFunctional.fitnessFunction(minfun[i,j,:])\n\n else:\n ### prepare and complie fortran routines ###\n print (\"### prepare fortran routines\")\n replace_in_file('./python/py.parNvCenter.F95' , tmpDir +'parNvCenter.F95' , **cfg['NVSETUP'])\n replace_in_file('./python/py.parMemoryPulse.F95', tmpDir +'parMemoryPulse.F95', **cfg['MEFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MESpin'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MEConstraints'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCConstraints'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['METime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCTime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['FILES'])\n\n #write config file\n with open(prefix+\"parameter.cfg\", 'wb') as configfile:\n configParser.write(configfile)\n ### read config file ###\n\n print (\"### compile fortran routines\")\n cmd = \"mv \"+tmpDir+\"parMemoryPulse.F95 \"+wd+\"srcOptCntrl/parMemoryPulse.F95\"\n call(cmd.split())\n cmd = \"mv \"+tmpDir+\"parNvCenter.F95 \"+wd+\"srcNv/parNvCenter.F95\"\n call(cmd.split())\n\n cmd = \"./scripts/ifort-memoryHarmonics.sh \" + wd\n call(cmd.split())\n\n print (\"### invoke fortran routines\")\n print (\"### generation Type: \" + generationType)\n cmd = wd+\"memoryHarmonics\" # location of executable fortran program\n generateHarmonics = Popen(cmd.split(), stdin=PIPE) # run fortran program with piped standard input\n cmd = \"echo \" + generationType # communication with fortran-routine: chose action -> read or generate\n generateInput = Popen(cmd.split(), stdout=generateHarmonics.stdin) # send action to fortran program\n output = generateHarmonics.communicate()[0]\n generateInput.wait()\n ### prepare and complie fortran routines ###\n\n ### read data for functional variation ###\n cons['cavityT2_down'], \\\n cons['cavityT2_up'] = IOHelper.read_CavityMemory (**cfg['FILES'])\n\n fun ['mtrxBeta_up'], \\\n cons['vecT2_up'] , \\\n cons['fidelity_up'], \\\n cons['mtrxMemOlap'] = IOHelper.read_MtrxMemory(\"up\", **cfg['FILES']) \n\n fun ['mtrxBeta_down'], \\\n cons['vecT2_down'] , \\\n cons['fidelity_down'], \\\n __ = IOHelper.read_MtrxMemory(\"down\", **cfg['FILES']) \n ### read data for functional variation ###\n\n ### functional variation ###\n print (\"\\n### start minimization: \")\n print (\"### on initial \"+ str(sp.shape(minfun)[0])+\"x\"+str(sp.shape(minfun)[1]) + \"-grid (phi,theta) on sphere\")\n\n mincnt=0\n cntPhi =0\n t0=0\n tcnt=len(gridTheta)\n\n for phi in gridPhi[0:len(gridPhi)-1]: \n theta_i=0\n if(cntPhi > 0):\n t0=1\n tcnt=len(gridTheta)-1\n theta_i=1\n\n ### parallel evaluation with <cpu_count()> cores ###################################\n pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())\n\n results = [ pool.apply_async(evaluateSphericalVariation, args=(phi,theta,cntPhi,cntTheta))\n for theta,cntTheta in zip(gridTheta[t0:tcnt],sp.arange(t0,tcnt,1)) ]\n\n for p in results:\n myResult,i,j =p.get()\n minfun[i,j,:]=myResult\n\n #kill multiprocessing pool !!!\n pool.terminate()\n ### parallel evaluation with <cpu_count()> cores ###################################\n\n\n ### sequentiell evaluateion 1 core #################################################\n# for theta in gridTheta[t0:tcnt]:\n# minfun[cntPhi,theta_i,:],__,__=MemoryPulseFunctional.evaluateSphericalVariation (phi,theta,cntPhi,theta_i)\n# theta_i+=1\n ### sequentiell evaluateion 1 core #################################################\n\n cntPhi=cntPhi+1\n # end for phi\n\n minfun[ :, 0,:]=minfun[0, 0,:] # theta=0 : same vector on unit-sphere\n minfun[ :,-1,:]=minfun[0,-1,:] # theta=pi : same vector on unit-sphere\n minfun[-1, :,:]=minfun[0, :,:] # periodic\n\n print \"\\n### write to file: \" + name_varinit\n sp.savetxt(name_varinit,minfun.reshape((len(gridPhi)*len(gridTheta),conf['entries']+3)),\\\n header=' alpha0['+str(id0)+']; alpha0['+str(id1)+']; alpha0['+str(id2)+'];'\\\n +' fitness; success; norm[alpha]; abs[beta]; memolap; fidelity_up; fidelity_down; minfun') \n\n print \"#################################################################\"\n print \"#################################################################\"\n\n\n font = {\n 'fontsize' : 26,\n }\n\n mytitle=MemoryPulseFunctional.getName(pltId)\n\n ### prepare surface_plot ###############################################################\n x = outer(cos(gridPhi), sin(gridTheta))\n y = outer(sin(gridPhi), sin(gridTheta))\n z = outer(ones(size(gridPhi)), cos(gridTheta))\n\n myDensity=sp.zeros([len(gridPhi),len(gridTheta)])\n for i in sp.arange(0,len(gridPhi),1):\n for j in sp.arange(0,len(gridTheta),1):\n if (minfun[i,j,conf[pltId]] >= pltLim):\n myDensity[i,j] = pltLim+1\n else:\n myDensity[i,j] = minfun[i,j,conf[pltId]]\n\n \n # \"afmhot\", \"hot\", \"terrain\", \"brg\"\n cm = plt.cm.get_cmap(myCmap) \n myColors = cm(myDensity/pltLim)\n ### prepare surface_plot ###############################################################\n\n ### prepare colormap ###################################################################\n fig0_colors=sp.linspace(0,pltLim,pltBins)\n ### prepare colormap ###################################################################\n\n ### prepare histogram ##################################################################\n yHist,xHist = sp.histogram(myDensity.flatten(),pltBins)\n# xSpan = pltxHist.max()-xHist.min()\n# Color = [cm(((ix-xHist.min())/xSpan)) for ix in xHist]\n\n Color = [cm(((ix)/pltLim)) for ix in xHist]\n ### prepare histogram ##################################################################\n\n ### calculate fittest solutions ########################################################\n cntFit = 0\n\n for i in sp.arange(0,len(gridPhi),1):\n for j in sp.arange(0,len(gridTheta),1):\n\n if minfun[i,j,conf['fitness']] != 0.0 :\n cntFit+=1\n \n if (minfun[i,j,conf[pltId]] >= pltLim):\n myDensity[i,j] = pltLim\n else:\n myDensity[i,j] = minfun[i,j,conf[pltId]]\n \n if (cntFit > 0):\n fitAlpha =sp.zeros([cntFit,int(cfg['MEFourier']['{storage_harmonic}'])]) \n\n cntFit = 0\n \n for i in sp.arange(0,len(gridPhi),1):\n for j in sp.arange(0,len(gridTheta),1):\n if minfun[i,j,conf['fitness']] != 0.0 :\n print minfun[i,j,3:]\n \n fitAlpha[cntFit,conf['id0']]=minfun[i,j,0]\n fitAlpha[cntFit,conf['id1']]=minfun[i,j,1]\n fitAlpha[cntFit,conf['id2']]=minfun[i,j,2]\n\n cntFit+=1\n\n name_fittest=IOHelper.getNameInitialFittest(**cfg)\n print \"\\n### write fittest to file: \" + name_fittest\n sp.savetxt(name_fittest,fitAlpha,\\\n header='# alpha0[0] ... alpha0[n]') \n\n print \"### fitness counter: {0:} of {1:}\".format(cntFit,sp.shape(minfun)[0]*sp.shape(minfun)[1])\n ### calculate fittest solutions ########################################################\n\n\n fig = plt.figure()\n\n\n ### plot surface_plot ##################################################################\n fig3D = fig.add_subplot(131, projection='3d')\n surf = fig3D.plot_surface(x, y, z, rstride=1, cstride=1,facecolors=myColors)\n fig3D.plot(cos(gridPhi), sin(gridPhi), zs=0, zdir='z',lw=0.5, color=\"black\")\n fig3D.plot(cos(gridPhi), sin(gridPhi), zs=0, zdir='x',lw=0.5, color=\"black\")\n fig3D.set_xlabel(\"$\\\\alpha_{:}$\".format(id0), **font)\n fig3D.set_ylabel(\"$\\\\alpha_{:}$\".format(id1), **font)\n fig3D.set_zlabel(\"$\\\\alpha_{:}$\".format(id2), **font)\n fig3D.set_title(\"a) 4d-plot of \"+mytitle, fontsize = 20)\n ### plot surface_plot ##################################################################\n\n\n ### plot colormap ######################################################################\n fig0 = fig.add_subplot(132)\n fig0.invert_yaxis()\n cp0=fig0.contourf(gridPhi/sp.pi,gridTheta/sp.pi,minfun[:,:,conf[pltId]].T,fig0_colors,cmap=cm)\n plt.colorbar(cp0)\n fig0.set_xlabel(\"$\\phi/\\pi$\", **font)\n fig0.set_ylabel(\"$\\\\theta/\\pi$\", **font)\n fig0.set_title(\"b) map of \"+mytitle, fontsize = 20)\n ### plot colormap ######################################################################\n\n\n ### plot histogram #####################################################################\n figBar=fig.add_subplot(133)\n figBar.bar(xHist[:-1],yHist,color=Color,width=xHist[1]-xHist[0])\n figBar.set_xlim([0,pltLim])\n figBar.set_ylim([0,max(yHist[0:len(yHist)-1])])\n figBar.set_xlabel(mytitle, fontsize=20)\n figBar.set_ylabel(\"count\", fontsize=20)\n figBar.set_title(\"c) histogram\", fontsize = 20)\n ### plot histogram #####################################################################\n\n plt.show()\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n" }, { "alpha_fraction": 0.6680850982666016, "alphanum_fraction": 0.7319148778915405, "avg_line_length": 45.599998474121094, "blob_id": "fab4c3f1d4e38526cde8addcff193dfa112c1d8c", "content_id": "ccae06a1448774c7deb16ed2f3079334de063221", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 235, "license_type": "no_license", "max_line_length": 198, "num_lines": 5, "path": "/scripts/ifort-spinAnalysis.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm -f \"$1\"SpinCheck\n\nifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modSmallestOverlap.F95 -Tf \"$1\"srcOptCntrl/modMemoryPulse.F95 -Tf \"$1\"srcMain/main_NVCheck_2DSpins.F95 -free -o \"$1\"SpinAnalysis\n\n\n" }, { "alpha_fraction": 0.5498719215393066, "alphanum_fraction": 0.5897033214569092, "avg_line_length": 42.51079177856445, "blob_id": "b739db7637eefe5329f991016059debfe0e5d486", "content_id": "ccc0ba2fb19c07195565fb469d5970953800868d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12101, "license_type": "no_license", "max_line_length": 146, "num_lines": 278, "path": "/python/SmallestOverlap-BlochPartialOverlap.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nfrom scipy.integrate import cumtrapz\nfrom math import acos,asin\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\n\nfrom IOHelper import replace_in_file\nimport IOHelper\nfrom subprocess import Popen, PIPE, call\n\nfrom math import log10, floor\n\n\n##########################################################################################\n### main routine #########################################################################\n\ndef round_sig(x, sig=2):\n return round(x, sig-int(floor(log10(x)))-1)\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",thetaCnt=11,phiCnt=11,bloch=0,myMap=\"jet\",minOlap=0,fortranCheck=0,gridR=5,gridC=5):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n cfg=IOHelper.loadCfg(wd,cfg)\n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n\n time['write'][:] *= 1e9\n time['read'][:] *= 1e9\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf] \n dt = float(time['delta_t'])\n\n filename =IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead]\n alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown]\n alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp]\n ### read data ###\n\n ### plotting\n Reg1Up = sp.dot(alphaU.conj(),cavityWrite)\n Reg1Down = sp.dot(alphaD.conj(),cavityWrite)\n\n Reg2Down = sp.dot(alphaD.conj(),cavityMemo)\n Reg2Up = sp.dot(alphaU.conj(),cavityMemo)\n Reg2Read = sp.dot(alphaR.conj(),cavityRead)\n\n Reg2DownRead = Reg2Down + Reg2Read\n Reg2UpRead = Reg2Up + Reg2Read\n\n spos = sp.zeros([2,thetaCnt,phiCnt],complex)\n FuncInfoOlap = sp.zeros([2,thetaCnt,phiCnt],complex)\n gamma = sp.zeros([thetaCnt,phiCnt],complex)\n delta = sp.zeros([thetaCnt,phiCnt],complex)\n\n# I00 = 1j*cumtrapz( (Reg2Down[ti:tf].conj() * Reg2Down[ti:tf]).imag, x=None, dx=dt )[-1]\n I00 = cumtrapz( (Reg2Down[ti:tf] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1]\n I01 = 1j*cumtrapz( (Reg2Up [ti:tf] * Reg2Down[ti:tf].conj()).imag, x=None, dx=dt )[-1]\n I01 += cumtrapz( (Reg2Up [ti:tf] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1]\n I0R = 1j*cumtrapz( (Reg2Read[ti:tf] * Reg2Down[ti:tf].conj()).imag, x=None, dx=dt )[-1]\n I0R += cumtrapz( (Reg2Read[ti:tf] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1]\n\n# I11 = 1j*cumtrapz( (Reg2Up [ti:tf].conj() * Reg2Up [ti:tf]).imag, x=None, dx=dt )[-1]\n I11 = cumtrapz( (Reg2Up [ti:tf] * Reg2Up [ti:tf].conj()).real, x=None, dx=dt )[-1]\n I10 = 1j*cumtrapz( (Reg2Down[ti:tf] * Reg2Up [ti:tf].conj()).imag, x=None, dx=dt )[-1]\n I10 += cumtrapz( (Reg2Down[ti:tf] * Reg2Up [ti:tf].conj()).real, x=None, dx=dt )[-1]\n I1R = 1j*cumtrapz( (Reg2Read[ti:tf] * Reg2Up [ti:tf].conj()).imag, x=None, dx=dt )[-1]\n I1R += cumtrapz( (Reg2Read[ti:tf] * Reg2Up [ti:tf].conj()).real, x=None, dx=dt )[-1]\n\n for i in sp.arange(0.0,thetaCnt):\n theta = i/(thetaCnt-1.0) # from 0 to 1 * pi\n \n for j in sp.arange(0.0,phiCnt):\n phi = j/(phiCnt-1.0)*2.0 # from 0 to 2 * pi\n \n spos[0,i,j] = sp.cos(theta*sp.pi/2.0) \n spos[1,i,j] = sp.sin(theta*sp.pi/2.0)*sp.exp(1j*phi*sp.pi)\n\n if fortranCheck == 1:\n cmd = \"./scripts/ifort-checkBloch.sh \" + wd\n print (\"compile fortran routines: \"+cmd)\n call(cmd.split())\n\n print (\"### call checkBloch\")\n cmd=wd+\"checkBloch\"\n generateSuperposition = Popen(cmd.split(), stdin=PIPE) # run fortran program with piped standard input\n cmd = \"echo {:}\".format(thetaCnt) # communication with fortran-routine: chose superposition parameter\n generateInput = Popen(cmd.split(), stdout=generateSuperposition.stdin) # send action to fortran program\n cmd = \"echo {:}\".format(phiCnt) # communication with fortran-routine: chose superposition parameter\n generateInput = Popen(cmd.split(), stdout=generateSuperposition.stdin) # send action to fortran program\n output = generateSuperposition.communicate()[0]\n generateInput.wait()\n\n FuncInfoOlap [0,:,:],FuncInfoOlap [1,:,:]=IOHelper.read_MtrxProjection(thetaCnt,phiCnt,**cfg['FILES'])\n\n else:\n for i in sp.arange(0.0,thetaCnt): \n for j in sp.arange(0.0,phiCnt):\n FuncCavity = spos[0,i,j]*Reg2Down[ti:tf] + spos[1,i,j]*Reg2Up[ti:tf]\n FuncCavity[:] += Reg2Read [ti:tf]\n\n FuncInfoOlap [0,i,j] = cumtrapz( (FuncCavity[:] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1]\n FuncInfoOlap [0,i,j] += 1j*cumtrapz( (FuncCavity[:] * Reg2Down[ti:tf].conj()).imag, x=None, dx=dt )[-1]\n\n FuncInfoOlap [1,i,j] = cumtrapz( (FuncCavity[:] * Reg2Up[ti:tf].conj()).real, x=None, dx=dt )[-1]\n FuncInfoOlap [1,i,j] += 1j*cumtrapz( (FuncCavity[:] * Reg2Up[ti:tf].conj()).imag, x=None, dx=dt )[-1]\n\n if minOlap==0:\n gamma[:,:]=((FuncInfoOlap [0,:,:]-I0R)*I11+(I1R-FuncInfoOlap [1,:,:])*I01)/(I11*I00-I01*I10)\n delta[:,:]=((FuncInfoOlap [1,:,:]-I1R)*I00+(I0R-FuncInfoOlap [0,:,:])*I10)/(I11*I00-I01*I10)\n else :\n gamma[:,:]= (FuncInfoOlap [0,:,:]-I0R)/I00\n delta[:,:]= (FuncInfoOlap [1,:,:]-I1R)/I11\n\n fs = 22\n label_size = 12\n plt.rcParams['xtick.labelsize'] = label_size \n plt.rcParams['ytick.labelsize'] = label_size \n plt.rcParams['xtick.major.pad']='20'\n plt.rcParams['ytick.major.pad']='20'\n fig = plt.figure()\n\n if bloch == 0:\n xx, yy = sp.meshgrid(sp.linspace(0.0,2.0,phiCnt),sp.linspace(0.0,1.0,thetaCnt))\n\n zmin = 0.0\n zmax = +1.0\n\n zzOlapR0 = gamma [:,:].real\n zzOlapI0 = gamma [:,:].imag\n zzErr0 = sp.absolute(gamma[:,:]-spos[0,:,:])\n\n zzOlapR1 = delta [:,:].real\n zzOlapI1 = delta [:,:].imag\n zzErr1 = sp.absolute(delta[:,:]-spos[1,:,:]) \n\n fig1 = fig.add_subplot(321, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapR0, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapR0.min(), vmax=zzOlapR0.max())\n fig1.set_zlim(0,1)\n# fig1.set_title(\"Re$[\\,\\gamma_R\\,]\\\\approx\\cos(\\\\theta/2)$\",fontsize=fs)\n fig1.set_title(\"Re$[\\,\\gamma_R\\,]$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta_W/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi_W / \\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(323, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapI0, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapI0.min(), vmax=zzOlapI0.max())\n# fig1.set_zlim(-0.01,0.01)\n# fig1.set_title(\"Im$[\\,\\gamma_R\\,]\\\\approx\\cos(\\\\theta/2)$\",fontsize=fs)\n fig1.set_title(\"Im$[\\,\\gamma_R\\,]$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta_W/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi_W/\\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(322, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapR1, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapR1.min(), vmax=zzOlapR1.max())\n fig1.set_zlim(-1,1)\n# fig1.set_title(\"Re$[\\,\\delta_R\\,]\\\\approx\\sin(\\\\theta/2)\\cos(\\phi)$\",fontsize=fs)\n fig1.set_title(\"Re$[\\,\\delta_R\\,]$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta_W/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi_W / \\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(324, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapI1, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapI1.min(), vmax=zzOlapI1.max())\n fig1.set_zlim(-1,1)\n# fig1.set_title(\"Im$[\\,\\delta_R\\,]\\\\approx\\sin(\\\\theta/2)\\sin(\\phi)$\",fontsize=fs)\n fig1.set_title(\"Im$[\\,\\delta_R\\,]$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta_W/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi_W/\\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(325, projection='3d')\n fig1.plot_surface(xx, yy, zzErr0, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzErr0.min(), vmax=zzErr0.max())\n# fig1.set_zlim(-1,1)\n# fig1.set_title(\"Re$[\\,\\delta_R\\,]\\\\approx\\sin(\\\\theta/2)\\cos(\\phi)$\",fontsize=fs)\n fig1.set_title(\"$\\epsilon_\\gamma$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta_W/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi_W / \\pi$\",fontsize=fs)\n\n\n fig1 = fig.add_subplot(326, projection='3d')\n fig1.plot_surface(xx, yy, zzErr1, rstride=gridR, cstride=gridC, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzErr1.min(), vmax=zzErr1.max())\n# fig1.set_zlim(-1,1)\n# fig1.set_title(\"Re$[\\,\\delta_R\\,]\\\\approx\\sin(\\\\theta/2)\\cos(\\phi)$\",fontsize=fs)\n fig1.set_title(\"$\\epsilon_\\delta$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta_W/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi_W / \\pi$\",fontsize=fs)\n\n else:\n \n# x_expect = 2e0*sp.real(FuncInfoOlap [0,:,:]*FuncInfoOlap [1,:,:])\n# \n\n x = 2e0*sp.real(spos[0,:,:].conj()*spos [1,:,:])\n y = 2e0*sp.real(spos[0,:,:].conj()*spos [1,:,:]/1.0j)\n z = sp.absolute(spos[0,:,:])**2 -sp.absolute(spos [1,:,:])**2\n\n xprime = 2e0*sp.real(gamma [:,:].conj()*delta [:,:])\n yprime = 2e0*sp.real(gamma [:,:].conj()*delta [:,:]/1.0j)\n zprime = sp.absolute(gamma [:,:])**2 -sp.absolute(delta [:,:])**2\n \n myDensity = sp.sqrt(sp.absolute(x-xprime)**2 + sp.absolute(y-yprime)**2 + sp.absolute(z-zprime)**2)\n# myDensity = 1.0 - (x*xprime + y*yprime + z*zprime) \n \n cm = plt.cm.get_cmap(myMap) \n myMin = myDensity.min()\n myMax = max(sp.absolute(myDensity.max()),sp.absolute(myDensity.min()))\n myColors = cm(myDensity/myMax)\n m = plt.cm.ScalarMappable(cmap=myMap)\n\n fig3D = fig.add_subplot(1, 1, 1, projection='3d')\n\n surf = fig3D.plot_surface(xprime, yprime, zprime, rstride=1, cstride=1, linewidth=1, color=\"black\", \n facecolors=myColors,shade=False,antialiased=True,\n vmin=myMin, vmax=myMax)\n# fig3D.plot_wireframe(x*1.01, y*1.01, z*1.01, rstride=1, cstride=1,alpha=1,linewidth=1,color=\"black\")\n tick2=(myMin+myMax)/2.0\n tick1=round_sig(myMin+(myMax-myMin)*0.1,2)\n tick3=round_sig(myMax-(myMax-myMin)*0.1,2)\n tick2=round_sig(tick2,2)\n print \"### error boundaries:\", tick1,tick2,tick3\n\n m.set_array(myDensity)\n cb= plt.colorbar(m,shrink=0.5,aspect=7,ticks=([tick1,tick2,tick3])) \n cb.formatter.set_scientific(True) \n cb.formatter.set_powerlimits((0, 0))\n cb.update_ticks()\n \n fig3D.set_xlabel(\"$\\langle \\sigma_x(\\gamma^\\prime,\\delta^\\prime) \\\\rangle$\", fontsize=fs)\n fig3D.set_ylabel(\"$\\langle \\sigma_y(\\gamma^\\prime,\\delta^\\prime) \\\\rangle$\", fontsize=fs)\n fig3D.set_zlabel(\"$\\langle \\sigma_z(\\gamma^\\prime,\\delta^\\prime) \\\\rangle$\", fontsize=fs)\n \n fig3D.set_xticks([-1,0,1])\n fig3D.set_xlim([-1.01,1.01])\n fig3D.xaxis._axinfo['label']['space_factor'] = 2.0\n\n fig3D.set_yticks([-1,0,1])\n fig3D.set_ylim([-1.01,1.01])\n fig3D.yaxis._axinfo['label']['space_factor'] = 2.0\n\n fig3D.set_zticks([-1,0,1])\n fig3D.set_zlim([-1.01,1.01])\n fig3D.zaxis._axinfo['label']['space_factor'] = 2.0\n\n \n plt.show()\n\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.564007043838501, "alphanum_fraction": 0.6025289297103882, "avg_line_length": 39.145668029785156, "blob_id": "3635f5170b7e8f5b848e90a953eb4a7a481082cb", "content_id": "6771d9307ee4eb3f561d4876ccb855c6bf8c1341", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10202, "license_type": "no_license", "max_line_length": 130, "num_lines": 254, "path": "/python/SmallestOverlapTimedelay.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nfrom scipy.integrate import cumtrapz\nfrom math import acos,asin\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\n\nimport IOHelper\n\n\n##########################################################################################\n### main routine #########################################################################\n\ndef pltFunction (cavity1,cavity2,cavity3,plotType):\n if (plotType==0):\n return sp.absolute(cavity1[:])**2,sp.absolute(cavity2[:])**2,sp.absolute(cavity3[:])**2\n elif (plotType==1):\n return sp.real(cavity1[:]),sp.real(cavity2[:]),sp.real(cavity3[:])\n elif (plotType==2):\n return sp.imag(cavity1[:]),sp.imag(cavity2[:]),sp.imag(cavity3[:])\n else: \n return cavity1, cavity2, cavity3\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",cnt=11,delay=27.725,sptype=0,phase=0.0):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n\n time['write'][:] *= 1e9\n time['read'][:] *= 1e9\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf] \n dt = float(time['delta_t'])\n\n filename =IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead]\n alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown]\n alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp]\n ### read data ###\n\n ### plotting\n Reg1Up = sp.dot(alphaU.conj(),cavityWrite)\n Reg1Down = sp.dot(alphaD.conj(),cavityWrite)\n\n Reg2Down = sp.dot(alphaD.conj(),cavityMemo)\n Reg2Up = sp.dot(alphaU.conj(),cavityMemo)\n Reg2Read = sp.dot(alphaR.conj(),cavityRead)\n\n Reg2DownRead = Reg2Down + Reg2Read\n Reg2UpRead = Reg2Up + Reg2Read\n\n FuncInfo = sp.zeros([3,cnt])\n FuncInfoPhase = sp.zeros([3,cnt])\n FuncShiftRead = sp.zeros([3,cnt,functime.size],complex)\n\n denom = sp.zeros([3])\n# denom[0] = cumtrapz( sp.absolute(Reg2DownRead[ti:tf])**2, x=None, dx=dt )[-1] \n# denom[1] = cumtrapz( sp.absolute(Reg2UpRead[ti:tf])**2, x=None, dx=dt )[-1] \n\n denom[0] = cumtrapz( sp.absolute(Reg2UpRead[ti:tf]+Reg2DownRead[ti:tf])**2, x=None, dx=dt )[-1] \n denom[1] = cumtrapz( sp.real(Reg2UpRead[ti:tf]+Reg2DownRead[ti:tf]), x=None, dx=dt )[-1] \n denom[2] = cumtrapz( sp.imag(Reg2UpRead[ti:tf]+Reg2DownRead[ti:tf]), x=None, dx=dt )[-1] \n\n# denom[0] = sp.absolute[denom[0]]\n# denom[1] = sp.absolute[denom[1]]\n# denom[2] = sp.absolute[denom[2]]\n\n for i in sp.arange(0.0,cnt):\n myDelay = i/(cnt-1.0)*delay\n \n\n shift=sp.absolute(time['read'][:]-(functime[0]-myDelay)).argmin()-1\n print myDelay,functime[0],functime[0]-myDelay,shift,time['read'][shift]\n\n \n# FuncShiftRead[0,i,:] = Reg2DownRead[shift:shift+functime.size]\n FuncShiftRead[0,i,:] = sp.exp(-1j*phase*sp.pi)*Reg2Down[shift:shift+functime.size] + Reg2Read[shift:shift+functime.size]\n FuncShiftRead[1,i,:] = Reg2UpRead [ti:tf]\n FuncShiftRead[2,i,:] = FuncShiftRead[0,i,:] + FuncShiftRead[1,i,:]\n\n\n FuncInfo[0,i] = cumtrapz( sp.absolute(FuncShiftRead[2,i,:])**2, x=None, dx=dt )[-1]\n FuncInfo[0,i] /= sp.absolute(denom[0])\n\n FuncInfo[1,i] = cumtrapz( sp.real(FuncShiftRead[2,i,:]), x=None, dx=dt )[-1]\n FuncInfo[1,i] /= sp.absolute(denom[1])\n\n FuncInfo[2,i] = cumtrapz( sp.imag(FuncShiftRead[2,i,:]), x=None, dx=dt )[-1]\n FuncInfo[2,i] /= sp.absolute(denom[2])\n\n# FuncInfoIntegrand = sp.absolute(FuncSuperRead[i,:]) * sp.absolute(Reg2UpRead[ti:tf])\n# FuncInfo[i,1] = cumtrapz( FuncInfoIntegrand, x=None, dx=dt )[-1]/denom[1]\n\n# FuncInfoIntegrand = FuncSuperRead[i,:].conj() * Reg2UpRead[ti:tf]\n# FuncInfoPhase[i,0] = cumtrapz( FuncInfoIntegrand.real, x=None, dx=dt )[-1]/denom[1]\n# FuncInfoPhase[i,1] = cumtrapz( FuncInfoIntegrand.imag, x=None, dx=dt )[-1]/denom[1]\n\n\n xx, yy = sp.meshgrid(functime,sp.linspace(0.0,delay,cnt))\n xx0 = sp.linspace(0.0,delay,cnt)\n\n# zzR = FuncSuperRead.real/FuncSuperRead.real.max()\n# zzI = FuncSuperRead.imag/FuncSuperRead.imag.max()\n zz0A = sp.absolute(FuncShiftRead[0,:,:])**2/((sp.absolute(FuncShiftRead[0:1,:,:])**2).max())\n zz1A = sp.absolute(FuncShiftRead[1,:,:])**2/((sp.absolute(FuncShiftRead[0:1,:,:])**2).max())\n zzDA = sp.absolute(FuncShiftRead[2,:,:])**2/((sp.absolute(FuncShiftRead[0:1,:,:])**2).max())\n\n zmin = -1.5\n zmax = +1.0\n fs = 20\n\n fig = plt.figure()\n\n if sptype==0: # 3d plotting\n fig0 = fig.add_subplot(231, projection='3d')\n fig0.plot_surface(xx, yy, zz0A, rstride=1, cstride=5, cmap=\"Blues\", alpha=0.5,zorder=11.0,vmin=-1, vmax=1)\n fig0.contourf(xx, yy, zz0A, zdir='z', offset=zmin, cmap=\"Blues\", vmin=-1, vmax=1,zorder=1.0)\n fig0.set_zlim(zmin,zmax)\n fig0.set_title(\"a) normalized $|A_0(t-\\Delta t)|^2$\",fontsize=fs)\n fig0.set_xlabel(\"$t$ in ns\",fontsize=fs)\n fig0.set_ylabel(\"$\\Delta t$ in ns.\",fontsize=fs)\n\n fig1 = fig.add_subplot(232, projection='3d')\n fig1.plot_surface(xx, yy, zz1A, rstride=1, cstride=5, cmap=\"Reds\", alpha=0.5,zorder=11.0,vmin=-1, vmax=1)\n fig1.contourf(xx, yy, zz1A, zdir='z', offset=zmin, cmap=\"Reds\", vmin=-1, vmax=1,zorder=1.0)\n fig1.set_zlim(zmin,zmax)\n fig1.set_title(\"b) normalized $|A_1(t)|^2$\",fontsize=fs)\n fig1.set_xlabel(\"$t$ in ns\",fontsize=fs)\n fig1.set_ylabel(\"$\\Delta t$ in ns.\",fontsize=fs)\n\n fig2 = fig.add_subplot(233, projection='3d')\n fig2.plot_surface(xx, yy, zzDA, rstride=1, cstride=5, cmap=\"Greens\", alpha=0.5,zorder=11.0,vmin=-1, vmax=4)\n fig2.contourf(xx, yy, zzDA, zdir='z', offset=zmin, cmap=\"Greens\", vmin=-1, vmax=4,zorder=1.0)\n fig2.set_zlim(zmin,4*zmax)\n fig2.set_title(\"c) normalized $|A_0(t-\\Delta t)-A_1(t)|^2$\",fontsize=fs)\n fig2.set_xlabel(\"$t$ in ns\",fontsize=fs)\n fig2.set_ylabel(\"$\\Delta t$ in ns.\",fontsize=fs)\n else :\n pltId=(cnt-1)/2\n yMax = round(max(max(zzDA[0,:]),max(zzDA[pltId,:]),max(zzDA[-1,:]))) + 0.5\n yMin = min(min(zzDA[0,:]),min(zzDA[pltId,:]),min(zzDA[-1,:]))\n\n plt.subplot2grid((2,3),(0,0))\n plt.title(\"a) no delay, $\\Delta t=0$\",fontsize=fs)\n plt.plot(functime,zzDA[0,:],linewidth=\"2\",color=\"black\")\n plt.xlabel(\"$t$ in ns\",fontsize=fs)\n plt.ylabel(\"$|A_0(t-\\Delta t) + A_1(t)|^2$ in a.u.\",fontsize=fs)\n plt.xlim(min(functime),max(functime))\n plt.ylim(yMin,yMax)\n \n\n plt.subplot2grid((2,3),(0,1))\n plt.title(\"b) medium delay, $\\Delta t=\\Delta t_{max}/2=$\"+\"{:}ns\".format(pltId/(cnt-1.0)*delay),fontsize=fs)\n plt.plot(functime,zzDA[pltId,:],linewidth=\"2\",color=\"black\")\n plt.xlabel(\"$t$ in ns\",fontsize=fs)\n plt.xlim(min(functime),max(functime))\n plt.ylim(yMin,yMax)\n \n plt.subplot2grid((2,3),(0,2))\n plt.title(\"c) maximal delay, $\\Delta t=\\Delta t_{max}=$\"+\"{:}ns\".format(delay),fontsize=fs)\n plt.plot(functime,zzDA[-1,:],linewidth=\"2\",color=\"black\")\n plt.xlabel(\"$t$ in ns\",fontsize=fs)\n plt.xlim(min(functime),max(functime))\n plt.ylim(yMin,yMax)\n\n\n plt.subplot2grid((2,3),(1,0))\n plt.plot(xx0,FuncInfo[1,:],linewidth=\"2\",color=\"red\")\n plt.title(\"d) area under $Re[A_0(t-\\Delta t)+A_1(t)]$\",fontsize=fs)\n plt.xlabel(\"$\\Delta t$ in ns\",fontsize=fs)\n plt.xlim(min(xx0),max(xx0))\n\n plt.subplot2grid((2,3),(1,1))\n plt.plot(xx0,FuncInfo[2,:],linewidth=\"2\",color=\"magenta\")\n plt.title(\"e) area under $Im[A_0(t-\\Delta t)+A_1(t)]$\",fontsize=fs)\n plt.xlabel(\"$\\Delta t$ in ns\",fontsize=fs)\n plt.xlim(min(xx0),max(xx0))\n\n plt.subplot2grid((2,3),(1,2))\n plt.plot(xx0,FuncInfo[0,:],linewidth=\"2\",color=\"green\")\n plt.title(\"f) area under $|A_0(t-\\Delta t)+A_1(t)|^2$\",fontsize=fs)\n plt.xlabel(\"$\\Delta t$ in ns\",fontsize=fs)\n plt.xlim(min(xx0),max(xx0))\n\n# FuncInfoPhase[:,0]-=min(FuncInfoPhase[:,0])\n# FuncInfoPhase[:,0]/=0.5*max(sp.absolute(FuncInfoPhase[:,0]))\n# FuncInfoPhase[:,0]-=1.0\n\n# FuncInfoPhase[:,1]-=min(FuncInfoPhase[:,1])\n# FuncInfoPhase[:,1]/=0.5*max(sp.absolute(FuncInfoPhase[:,1]))\n# FuncInfoPhase[:,1]-=1.0\n\n# plt.subplot2grid((2,3),(1,2),colspan=1,rowspan=1)\n# plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,0],linewidth=\"2\",color=\"red\")\n# plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,1],linewidth=\"2\",color=\"magenta\")\n# plt.title(\"f) $O_{\\mathbb{C}}$ scaled & translated, $(O_{\\mathbb{C}}($'$1$'$)-I_R)/I_0$\",fontsize=fs)\n## plt.title(\"$\\\\frac{1}{N}\\int_{T_{{\\cal F}1}}^{T_{{\\cal F}2}}dt\\,A_0(t;\\phi_0)^*\\cdot A_1(t)$ scaled & translated\",fontsize=fs)\n# plt.xlabel(\"$\\phi_0/\\pi$\",fontsize=fs)\n# plt.ylim(-1,1)\n\n## myPhase = sp.zeros([cnt,2])\n## for i in sp.arange(0,cnt):\n## myPhase[i,0]=acos(FuncInfoPhase[i,0])/sp.pi\n## myPhase[i,1]=acos(FuncInfoPhase[i,1])/sp.pi\n\n## plt.subplot2grid((2,3),(1,2),colspan=1,rowspan=1)\n## plt.plot(sp.linspace(0.0,2.0,cnt),myPhase[:,0])\n## plt.plot(sp.linspace(0.0,2.0,cnt),myPhase[:,1])\n# \n\n## fig0.plot(memo_times[:], fitness[:,conf[pltId]], zs=tmax, zdir='y',lw=1.5, color=\"green\")\n\n plt.show()\n\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.39331138134002686, "alphanum_fraction": 0.4051854610443115, "avg_line_length": 40.56192398071289, "blob_id": "60948f8d046be1d6f74c86e820b8318b5f169a88", "content_id": "789f9d91035f5f4f86ebc90f79f61c59f2cd40df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22486, "license_type": "no_license", "max_line_length": 114, "num_lines": 541, "path": "/python/MemoryPulseFunctional.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nimport scipy.linalg as la\n\nfrom scipy.optimize import minimize\n\n##########################################################################################\n### globals ##############################################################################\n##########################################################################################\n\n##########################################################################################\niteration=0\n\ncons=dict()\nfun =dict()\ndim =dict()\n\nconf=dict()\nconf['entries'] = 8\nconf['funval'] =-1\nconf['fidelity_down'] =-2\nconf['fidelity_up'] =-3\nconf['memolap'] =-4\nconf['alpha'] =-5\nconf['beta'] =-6\nconf['success'] =-7\nconf['fitness'] =-8\n\ndef getName(nameId):\n myname = ''\n\n if nameId =='funval':\n myname=\"functional value\"\n elif nameId=='fidelity_down':\n myname=\"fidelity \\\"$0$\\\"\"\n elif nameId=='fidelity_up':\n myname=\"fidelity \\\"$1$\\\"\"\n elif nameId=='memolap':\n myname=\"overlap \\\"$0$\\\" and \\\"$1$\\\"\"\n elif nameId=='alpha':\n myname=\"norm $\\\\alpha$\"\n elif nameId=='beta':\n myname=\"norm $\\\\beta$\"\n elif nameId=='success':\n myname=\"variation success\"\n\n return myname\n##########################################################################################\n\n\n##########################################################################################\n### constraints and functionals ##########################################################\n##########################################################################################\n\n##########################################################################################\ndef getX(alpha,beta):\n global dim,conf\n if (conf['useBeta']):\n x =sp.zeros([2*dim['alpha']+2]) # map complex valued vector to real valued vector\n x[-2] =beta.real # corresp. x0[dim['alpha']]\n x[-1] =beta.imag # corresp. to x0[dim['alpha']+1]\n else:\n x =sp.zeros([2*dim['alpha']]) # map complex valued vector to real valued vector\n\n x[ :2*dim['alpha']:2] =alpha[:].real # odds: 1,3,5,... <-> real part\n x[1:2*dim['alpha']:2] =alpha[:].imag # even: 2,4,6,... <-> imag part\n\n return x\n##########################################################################################\n\n##########################################################################################\ndef getAlpha(x):\n global dim\n return x[:2*dim['alpha']:2]+1j*x[1:2*dim['alpha']:2] # extract vector alpha out of variational vector x \n##########################################################################################\n\n##########################################################################################\ndef getBeta(x):\n global conf\n if (conf['useBeta']):\n return x[-2]+1j*x[-1] # extract pahse beta out of variational vector x\n else:\n return 1.0+0j\n##########################################################################################\n\n##########################################################################################\ndef constraintNormAlpha (x):\n \"\"\" norm of reading vector must equal the reading amplitude \"\"\"\n global cons\n return cons['alpha_norm']**2 - la.norm(getAlpha(x))**2\n##########################################################################################\n\n##########################################################################################\ndef constraintBetaLow (x):\n \"\"\" constraint the lower bound of the length of 'phase' beta, beta_low < |beta| \"\"\"\n global cons\n return sp.absolute(getBeta(x))-cons['beta_low']\n##########################################################################################\n\n##########################################################################################\ndef constraintBetaTop (x):\n \"\"\" constraint the upper bound of the length of 'phase' beta, beta_top > |beta| \"\"\"\n global cons\n return cons['beta_top']-sp.absolute(getBeta(x))\n##########################################################################################\n\n##########################################################################################\ndef constraintCavityT2 (x,statekey):\n \"\"\" constraint for the the cavity mode ... |A(T2)| should match |A(T2+TS)| \"\"\"\n global dim,cons,conf\n\n gamma = sp.ones([dim['alpha']+2],complex)\n gamma[:dim['alpha']] = getAlpha(x)\n gamma[-1] = getBeta(x)\n\n my_state = sp.dot(gamma.conj().T,cons[statekey])\n my_original = gamma[-1].conj()*cons[statekey][-1]\n\n if ( conf['cavityMatch'] == 0) :\n my_state=my_state-my_original # everything but last element of scalar product\n return sp.absolute(my_original)**2-sp.absolute(my_state)**2\n else :\n return sp.absolute(my_state)\n\ndef constraintCavityT2_up (x):\n \"\"\" constraint for the the cavity mode ... |A(T2)| should match |A(T2+TS)| \"\"\"\n return constraintCavityT2(x,'vecT2_up')\n\ndef constraintCavityT2_down (x):\n \"\"\" constraint for the the cavity mode ... |A(T2)| should match |A(T2+TS)| \"\"\"\n return constraintCavityT2(x,'vecT2_down')\n##########################################################################################\n\n##########################################################################################\ndef constraintFidelity_up (x):\n global dim,cons\n return sp.absolute(1.0-evaluateFidelity(cons['fidelity_up'],x,dim['alpha']))\n##########################################################################################\n\n##########################################################################################\ndef constraintFidelity_down (x):\n global dim,cons\n return sp.absolute(1.0-evaluateFidelity(cons['fidelity_down'],x,dim['alpha']))\n##########################################################################################\n\n##########################################################################################\ndef functionalFidelity(x):\n global conf\n myFunc = 0.0\n if ( conf['toMinimize'] == 1) :\n myFunc = constraintFidelity_up(x)\n elif ( conf['toMinimize'] == 0) :\n myFunc = constraintFidelity_down(x)\n else : \n myFunc = constraintFidelity_up(x) + constraintFidelity_down(x)\n\n return myFunc\n##########################################################################################\n\n##########################################################################################\ndef evaluateFidelity(fidelity_vector,x,dimAlpha):\n gamma = sp.ones([dimAlpha+2],complex)\n gamma[:dimAlpha] = getAlpha(x)\n gamma[-2] = getBeta (x)\n gamma[-1] = 1.0+0j\n return sp.absolute(sp.dot(gamma.conj().T,fidelity_vector))/sp.absolute(gamma[-2])\n##########################################################################################\n\n##########################################################################################\ndef evaluateMemOlap (x):\n global dim,cons\n gamma = sp.ones([dim['alpha']+2],complex)\n gamma[:dim['alpha']] = getAlpha(x)\n gamma[-2] = getBeta (x)\n gamma[-1] = 1.0+0j\n return sp.absolute(sp.dot(gamma.conj().T,sp.dot(cons['mtrxMemOlap'],gamma)))\n##########################################################################################\n\n##########################################################################################\ndef chiSquared (x):\n \"\"\" evaluates chiSquared measure sum_k |B_k(T1)-B_k(T2)|^2 of the spin ensemble \"\"\"\n global dim,fun,conf\n \n gamma = sp.zeros([dim['total']],complex)\n gamma[:dim['alpha']] = getAlpha(x)\n gamma[-1] = getBeta(x)\n\n if ( conf['toMinimize'] == 1) :\n myFunc = fun['mtrxBeta_up']\n elif ( conf['toMinimize'] == 0) :\n myFunc = fun['mtrxBeta_down']\n else : \n myFunc = fun['mtrxBeta_up'] + fun['mtrxBeta_down']\n\n return (sp.dot(gamma.conj().T,sp.dot(myFunc,gamma))).real\n##########################################################################################\n\n##########################################################################################\ndef functionalMemOlapChi2 (x):\n return chiSquared(x)+evaluateMemOlap(x)\n##########################################################################################\n\n##########################################################################################\n### TODO ... check this\ndef constraintHoleValue(x):\n return constraintHole(x,'value')\n\ndef constraintHoleSlope(x):\n return constraintHole(x,'slope')\n\ndef constraintHoleCurv(x):\n return constraintHole(x,'curv')\n\ndef constraintHole(x,key):\n global cons,dim,conf\n\n gamma = sp.zeros([dim['alpha']+1],complex)\n gamma[:dim['alpha']] = getAlpha(x)\n gamma[-1] = 1.0+0j\n\n holeD = cons['hole'][key][0:dim['alpha']+1,:]\n\n holeU = cons['hole'][key][0:dim['alpha']+1,:]\n holeU[-1,:]= cons['hole'][key][-1,:]\n\n myDown = sp.dot(gamma.conj().T,holeD)\n myUp = sp.dot(gamma.conj().T,holeU)\n\n if ( conf['toMinimize'] == 1) :\n myCons = sp.linalg.norm(myUp)**2\n elif ( conf['toMinimize'] == 0) :\n myCons = sp.linalg.norm(myDown)**2\n else : \n myCons = sp.linalg.norm(myDown)**2+sp.linalg.norm(myUp)**2\n\n \n return myCons\n##########################################################################################\n\n \n##########################################################################################\ndef chiSquaredGradient (x):\n \"\"\" evaluates chiSquared measure sum_k |B_k(T1)-B_k(T2)|^2 of the spin ensemble \"\"\"\n global dim,conf,fun\n \n gamma = sp.zeros([dim['total']],complex)\n gamma[:dim['alpha']] = getAlpha(x)\n gamma[-3] = 1.0+0.0j \n gamma[-2] = 1.0+0.0j\n gamma[-1] = getBeta(x) \n\n if ( conf['toMinimize'] == 1) :\n myFunc = fun['mtrxBeta_up']\n elif ( conf['toMinimize'] == 0) :\n myFunc = fun['mtrxBeta_down']\n else : \n myFunc = fun['mtrxBeta_up'] + fun['mtrxBeta_down']\n \n complxGradient = sp.dot(myFunc,gamma)\n \n if (conf['useBeta']):\n functGradient = sp.zeros([dim['alpha']*2+2])\n functGradient[ -2] = 2.0*complxGradient[-1].real\n functGradient[ -1] = 2.0*complxGradient[-1].imag\n else:\n functGradient = sp.zeros([dim['alpha']*2])\n\n functGradient[ :2*dim['alpha']:2] = 2.0*complxGradient[:dim['alpha']].real\n functGradient[1:2*dim['alpha']:2] = 2.0*complxGradient[:dim['alpha']].imag\n\n\n return functGradient\n\ndef monitor(x):\n global iteration, conf\n if conf['silent'] == 0 :\n print (\" Iteration: {0:6d}\\r\".format(iteration)),\n iteration+=1\n##########################################################################################\n\n\n##########################################################################################\n### actual functional evaluation by initial alpha0 and beta0 #############################\n##########################################################################################\n\n##########################################################################################\ndef evaluateFunctional (alpha0,beta0):\n global conf,cons,iteration\n if ( conf['toMinimize'] == 1) :\n minContraints= ( {'type' : 'ineq', 'fun' : constraintNormAlpha },\n {'type' : 'ineq', 'fun' : constraintBetaLow },\n {'type' : 'ineq', 'fun' : constraintBetaTop },\n {'type' : 'eq', 'fun' : constraintCavityT2_up },\n {'type' : 'eq', 'fun' : constraintFidelity_up },\n# {'type' : 'eq', 'fun' : evaluateMemOlap },\n )\n elif ( conf['toMinimize'] == 0) :\n minContraints= ( {'type' : 'ineq', 'fun' : constraintNormAlpha },\n {'type' : 'ineq', 'fun' : constraintBetaLow },\n {'type' : 'ineq', 'fun' : constraintBetaTop },\n {'type' : 'eq', 'fun' : constraintCavityT2_down },\n {'type' : 'eq', 'fun' : constraintFidelity_down },\n# {'type' : 'eq', 'fun' : evaluateMemOlap },\n )\n else : \n minContraints= ( \n {'type' : 'ineq', 'fun' : constraintNormAlpha },\n {'type' : 'ineq', 'fun' : constraintBetaLow },\n {'type' : 'ineq', 'fun' : constraintBetaTop },\n {'type' : 'eq', 'fun' : constraintCavityT2_down },\n {'type' : 'eq', 'fun' : constraintCavityT2_up },\n {'type' : 'eq', 'fun' : constraintFidelity_down },\n {'type' : 'eq', 'fun' : constraintFidelity_up },\n# {'type' : 'eq', 'fun' : evaluateMemOlap },\n# {'type' : 'eq', 'fun' : chiSquared },\n# {'type' : 'eq', 'fun' : constraintHoleValue },\n# {'type' : 'eq', 'fun' : constraintHoleSlope },\n# {'type' : 'eq', 'fun' : constraintHoleCurv },\n )\n\n x0=getX(alpha0,beta0)\n\n iteration=1\n \n if (conf['silent']==0):\n disp=True\n else:\n disp=False\n\n if (disp):\n print (\" minimize chi-squared measure sum_sigma (sum_k |B^sigma_k(T1)-B^sigma_k(T2)|^2) with (method=SLSQP):\")\n print (\"\") \n\n res=minimize(functionalMemOlapChi2, #functionalFidelity, #chiSquared, #\n x0, \n method='SLSQP',\n# jac=chiSquaredGradient,\n constraints=minContraints,\n tol=cons['chi2_Tol'],\n options={'maxiter' : 40000, 'disp' : disp},\n callback=monitor\n )\n\n myRaw=dict()\n myRaw[conf['success']] =res.success\n myRaw[conf['alpha']] =getAlpha(res.x)\n myRaw[conf['beta']] =getBeta(res.x)\n myRaw[conf['memolap']] =evaluateMemOlap(res.x)\n myRaw[conf['fidelity_up']] =evaluateFidelity(cons['fidelity_up'],res.x,dim['alpha'])\n myRaw[conf['fidelity_down']]=evaluateFidelity(cons['fidelity_down'],res.x,dim['alpha'])\n myRaw[conf['funval']] =res.fun\n\n myFun =fitnessRawFunctional(myRaw)\n\n if (disp):\n print (\"\") \n print (\" optimized alpha (norm) = \" +str(myFun[conf['alpha']]))\n print (\" optimized beta = \" +str(myRaw[conf['beta']]))\n print (\" optimized fidelity up = \" +str(myFun[conf['fidelity_up']]))\n print (\" optimized fidelity down = \" +str(myFun[conf['fidelity_down']]))\n print (\" optimized memOlap = \" +str(myFun[conf['memolap']]))\n print (\" optimized fitness = \" +str(myFun[conf['fitness']]))\n# print (\" optimized hole value = \" +str(constraintHoleValue(res.x)))\n# print (\" optimized hole slope = \" +str(constraintHoleSlope(res.x)))\n# print (\" optimized hole curvature = \" +str(constraintHoleCurv (res.x)))\n\n return myRaw,myFun\n##########################################################################################\n\n\n##########################################################################################\n### fitness calculations #################################################################\n##########################################################################################\n\n##########################################################################################\ndef fitnessRawFunctional ( myRawfun ):\n global conf\n\n returnFun = sp.zeros([conf['entries']])\n returnFun[conf['success']] = myRawfun[conf['success']]\n returnFun[conf['beta']] = sp.absolute(myRawfun[conf['beta']])\n returnFun[conf['alpha']] = la.norm(myRawfun[conf['alpha']])\n returnFun[conf['memolap']] = myRawfun[conf['memolap']]\n returnFun[conf['fidelity_up']] = myRawfun[conf['fidelity_up']]\n returnFun[conf['fidelity_down']] = myRawfun[conf['fidelity_down']]\n returnFun[conf['funval']] = myRawfun[conf['funval']] \n\n returnFun[conf['fitness']] = fitnessFunction(returnFun)\n myRawfun [conf['fitness']] = returnFun[conf['fitness']] \n\n return returnFun\n##########################################################################################\n\n\n##########################################################################################\ndef fitnessFunction ( myFun ): \n global conf\n\n myMutationRate=conf['FITNESS']['{mutationrate}']\n myFitness=sp.zeros([len(myMutationRate),3])\n myFitness[:,0]=myMutationRate[:]\n\n ########################################################################################\n ### functional fitness function ########################################################\n funV = myFun[conf['funval']]\n funTop = float(conf['FITNESS']['{lim_functional}'])\n\n myFitness[conf['funval'],1] = 1.0\n ### if out of range ...\n if funTop < funV :\n myFitness[conf['funval'],1] = 0.0\n myFitness[conf['funval'],2] = (funV-funTop)/funTop\n ### functional fitness function ########################################################\n ########################################################################################\n\n ########################################################################################\n ### fidelity fitness function ##########################################################\n fidTop = float(conf['FITNESS']['{lim_fidelity_top}'])\n fidLow = float(conf['FITNESS']['{lim_fidelity_low}'])\n\n fidU = myFun[conf['fidelity_up']]\n myFitness[conf['fidelity_up'],1] = 1.0\n ### if out of range ...\n if fidU < fidLow :\n myFitness[conf['fidelity_up'],1] = 0.0\n myFitness[conf['fidelity_up'],2] = (fidLow-fidU)/fidLow\n elif fidTop < fidU :\n myFitness[conf['fidelity_up'],1] = 0.0\n myFitness[conf['fidelity_up'],2] = (fidU-fidTop)/fidTop\n\n fidD = myFun[conf['fidelity_down']]\n myFitness[conf['fidelity_down'],1] = 1.0\n ### if out of range ...\n if fidD < fidLow :\n myFitness[conf['fidelity_down'],1] = 0.0\n myFitness[conf['fidelity_down'],2] = (fidLow-fidD)/fidLow\n elif fidTop < fidD :\n myFitness[conf['fidelity_down'],1] = 0.0\n myFitness[conf['fidelity_down'],2] = (fidD-fidTop)/fidTop\n ### fidelity fitness function ##########################################################\n ########################################################################################\n\n\n ########################################################################################\n ### overlap fitness function ###########################################################\n memOlapTop = float(conf['FITNESS']['{lim_memory_overlap}'])\n\n memOlap = myFun[conf['memolap']]\n myFitness[conf['memolap'],1] = 1.0\n ### if out of range ...\n if memOlapTop < memOlap :\n myFitness[conf['memolap'],1] = 0.0\n myFitness[conf['memolap'],2] = (memOlap-memOlapTop)/memOlapTop\n ### overlap fitness function ###########################################################\n ########################################################################################\n\n ########################################################################################\n ### coefficient fitness function #######################################################\n alphaTop = float(conf['FITNESS']['{var_storage_amplitude}'])\n alphaTop += float(conf['MEConstraints']['{storage_amplitude}'])\n\n cAlpha = myFun[conf['alpha']]\n myFitness[conf['alpha'],1] = 1.0\n ### if out of range ...\n if alphaTop < cAlpha :\n myFitness[conf['alpha'],1] = 0.0\n myFitness[conf['alpha'],2] = (cAlpha-alphaTop)/alphaTop\n\n betaTop = float(conf['FITNESS']['{var_beta}'])\n betaLow = float(conf['MEConstraints']['{limit_low_beta}'])-betaTop\n betaTop = float(conf['MEConstraints']['{limit_top_beta}'])+betaTop\n\n cBeta = myFun[conf['beta']]\n myFitness[conf['beta'],1] = 1.0\n ### if out of range ...\n if cBeta < betaLow :\n myFitness[conf['beta'],1] = 0.0\n myFitness[conf['beta'],2] = (betaLow-cBeta)/betaLow\n if betaTop < cBeta :\n myFitness[conf['beta'],1] = 0.0\n myFitness[conf['beta'],2] = (cBeta-betaTop)/betaTop\n ### coefficient fitness function #######################################################\n ########################################################################################\n\n ########################################################################################\n ### successrate fitness function #######################################################\n myFitness[conf['success'],1] = myFun[conf['success']]\n myFitness[conf['success'],2] = 0.0\n ### successrate fitness function #######################################################\n ########################################################################################\n\n ########################################################################################\n ### evaluate fitness function ##########################################################\n fitVal = 1.0\n\n fitVal *= mutate(myFitness[conf['funval'],:])\n\n if ( conf['toMinimize'] == 1) :\n fitVal *= mutate(myFitness[conf['fidelity_up'],:])\n elif ( conf['toMinimize'] == 0) :\n fitVal *= mutate(myFitness[conf['fidelity_down'],:])\n else : \n fitVal *= mutate(myFitness[conf['fidelity_up'],:])\n fitVal *= mutate(myFitness[conf['fidelity_down'],:])\n\n fitVal *= mutate(myFitness[conf['memolap'],:])\n fitVal *= mutate(myFitness[conf['alpha'],:])\n fitVal *= mutate(myFitness[conf['beta'],:])\n fitVal *= mutate(myFitness[conf['success'],:])\n\n\n ### evaluate fitness function ##########################################################\n ########################################################################################\n\n return fitVal\n##########################################################################################\n\n\n##########################################################################################\ndef mutate(myFitness):\n mutation = myFitness[0]\n fitness = myFitness[1]\n deviation = myFitness[2]\n\n\n # plot 1/(1+ exp(-(2.0*{0,1}-1.0)/x+0.00)) from 0 to 1\n if (mutation == 0.0):\n return fitness\n else:\n return min (fitness+(1.0-fitness)*mutation,1.0)\n\n # TODO ... weight deviation\n##########################################################################################\n\n\n##########################################################################################\n##########################################################################################\n##########################################################################################\n\n" }, { "alpha_fraction": 0.7528089880943298, "alphanum_fraction": 0.8202247023582458, "avg_line_length": 88, "blob_id": "02bbfd38cdcf0222afa665c30c3d7267f5db000a", "content_id": "293259bfae6ecd14c055f7dcece79abfcdc09016", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 89, "license_type": "no_license", "max_line_length": 88, "num_lines": 1, "path": "/scripts/gfort-complie.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "gfortran ./srcNv/modNvCenter.F95 ./srcOptCntrl/modSmallestOverlap.F95 ./srcMain/main.F95\n" }, { "alpha_fraction": 0.7338129281997681, "alphanum_fraction": 0.7733812928199768, "avg_line_length": 54.599998474121094, "blob_id": "cd65ed4043fe854e8f6685180c006f7a507dab08", "content_id": "5dca9c386673581301309372ef8f45f0958273ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 278, "license_type": "no_license", "max_line_length": 203, "num_lines": 5, "path": "/scripts/run-smallest-overlap.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\nrm ./smallOverlap.out\nifort -heap-arrays ./srcRand/MersenneTwister.F90 -Tf ./srcNv/modNvCenter.F95 -Tf ./srcOptCntrl/modSmallestOverlap.F95 -Tf ./srcMain/main.F95 -free /opt/NAG/fll6i25dcl/lib/libnag_nag.a -o smallOverlap.out\n./smallOverlap.out\ngnuplot ./myPlot.gnu\n" }, { "alpha_fraction": 0.5509161949157715, "alphanum_fraction": 0.5809552669525146, "avg_line_length": 35.75690460205078, "blob_id": "a2ffb2a9c37677a4b973e04cb75a3e8f2e676a3d", "content_id": "f071bee264fc53417ac3c7cdc9d1a1f231d8f8a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6658, "license_type": "no_license", "max_line_length": 138, "num_lines": 181, "path": "/python/SmallestOverlapRealBit.py~", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nfrom scipy.integrate import cumtrapz\nfrom math import acos,asin\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\n\nimport IOHelper\n\n\n##########################################################################################\n### main routine #########################################################################\n\n\ndef make_colormap(seq):\n \"\"\"Return a LinearSegmentedColormap\n seq: a sequence of floats and RGB-tuples. The floats should be increasing\n and in the interval (0,1).\n \"\"\"\n seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]\n cdict = {'red': [], 'green': [], 'blue': []}\n for i, item in enumerate(seq):\n if isinstance(item, float):\n r1, g1, b1 = seq[i - 1]\n r2, g2, b2 = seq[i + 1]\n cdict['red'].append([item, r1, r2])\n cdict['green'].append([item, g1, g2])\n cdict['blue'].append([item, b1, b2])\n return mcolors.LinearSegmentedColormap('CustomMap', cdict)\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",gammaCnt=11,bloch=0,myMap=\"custom\",project=0):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n\n time['write'][:] *= 1e9\n time['read'][:] *= 1e9\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf] \n dt = float(time['delta_t'])\n\n filename =IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead]\n alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown]\n alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp]\n ### read data ###\n\n ### plotting\n Reg1Up = sp.dot(alphaU.conj(),cavityWrite)\n Reg1Down = sp.dot(alphaD.conj(),cavityWrite)\n\n Reg2Down = sp.dot(alphaD.conj(),cavityMemo)\n Reg2Up = sp.dot(alphaU.conj(),cavityMemo)\n Reg2Read = sp.dot(alphaR.conj(),cavityRead)\n\n Reg2DownRead = Reg2Down + Reg2Read\n Reg2UpRead = Reg2Up + Reg2Read\n\n myDensity = sp.zeros([functime.size,gammaCnt])\n FuncCavity = sp.zeros([functime.size,gammaCnt],complex)\n spos = sp.zeros([2,gammaCnt])\n FuncInfoOlap = sp.zeros([2,gammaCnt],complex)\n\n I00 = cumtrapz( sp.real(Reg2DownRead[ti:tf] * Reg2DownRead[ti:tf].conj()), x=None, dx=dt )[-1]\n I11 = cumtrapz( sp.real(Reg2UpRead[ti:tf] * Reg2UpRead[ti:tf].conj()), x=None, dx=dt )[-1]\n\n for g in sp.arange(0.0,gammaCnt):\n gamma = g/(gammaCnt-1.0) \n\n myDensity[:,g]=1.0-gamma\n \n spos[0,g] = gamma\n spos[1,g] = 1.0-gamma\n FuncCavity[:,g] = spos[0,g]*Reg2Down[ti:tf] + spos[1,g]*Reg2Up[ti:tf]\n FuncCavity[:,g] += Reg2Read [ti:tf]\n\n FuncInfoOlap [0,g] = cumtrapz( (FuncCavity[:,g] * Reg2DownRead[ti:tf].conj()).real, x=None, dx=dt )[-1]\n FuncInfoOlap [0,g] += 1j*cumtrapz( (FuncCavity[:,g] * Reg2DownRead[ti:tf].conj()).imag, x=None, dx=dt )[-1]\n FuncInfoOlap [0,g] /= I00 \n FuncInfoOlap [0,g] = sp.absolute(FuncInfoOlap [0,g]) \n\n FuncInfoOlap [1,g] = cumtrapz( (FuncCavity[:,g] * Reg2UpRead[ti:tf].conj()).real, x=None, dx=dt )[-1]\n FuncInfoOlap [1,g] += 1j*cumtrapz( (FuncCavity[:,g] * Reg2UpRead[ti:tf].conj()).imag, x=None, dx=dt )[-1]\n FuncInfoOlap [1,g] /= I11 \n FuncInfoOlap [1,g] = sp.absolute(FuncInfoOlap [1,g]) \n\n# FuncInfoOlap [0,g] = cumtrapz( sp.absolute(FuncCavity[:,g]) * sp.absolute(Reg2DownRead[ti:tf].conj()), x=None, dx=dt )[-1]\n# FuncInfoOlap [0,g] /= I00 \n\n# FuncInfoOlap [1,g] = cumtrapz( sp.absolute(FuncCavity[:,g]) * sp.absolute(Reg2UpRead[ti:tf].conj()), x=None, dx=dt )[-1]\n# FuncInfoOlap [1,g] /= I11 \n\n fig = plt.figure()\n fs = 22\n label_size = 20\n plt.rcParams['xtick.labelsize'] = label_size \n plt.rcParams['ytick.labelsize'] = label_size \n\n xx, yy = sp.meshgrid(sp.linspace(0.0,1.0,gammaCnt),functime)\n\n zmax = (sp.absolute(FuncCavity)**2).max()\n\n c = mcolors.ColorConverter().to_rgb\n \n if myMap ==\"custom\" :\n cm = make_colormap(\n [c('blue'), c('purple'), c('red')])\n else:\n cm = plt.cm.get_cmap(myMap) \n \n myColors = cm(myDensity)\n\n fig1 = fig.add_subplot(111, projection='3d')\n fig1.plot(spos[0,:], FuncInfoOlap [0,:].real, zs=functime[ project], zdir='y',lw=2.5, color=\"blue\",label=\"$\\gamma^{\\prime}$\",zorder=0.1)\n fig1.plot(spos[0,:], FuncInfoOlap [1,:].real, zs=functime[ project], zdir='y',lw=2.5, color=\"red\" ,label=\"$\\delta^{\\prime}$\",zorder=0.1)\n\n fig1.plot_surface(xx, yy, sp.absolute(FuncCavity)**2/zmax,rstride=1, cstride=1,alpha=1,facecolors=myColors, antialiased=False)\n fig1.plot_wireframe(xx, yy, sp.absolute(FuncCavity)**2/zmax, rstride=15, cstride=3,alpha=1,linewidth=1,color=\"black\")\n\n\n fig1.legend(fontsize=fs)\n fig1.set_zlim(0,1.1)\n fig1.set_ylim(functime[0],functime[-1])\n fig1.set_xticks([0,0.5,1])\n fig1.set_yticks([55,77.5,100])\n fig1.set_zticks([0,0.5,1])\n fig1.set_xlim(0,1)\n fig1.set_zlabel(\"$|A(t)|^2$\",fontsize=fs)\n fig1.set_ylabel(\"$t$ in ns\",fontsize=fs)\n fig1.set_xlabel(\"$\\gamma$\",fontsize=fs)\n fig1.xaxis._axinfo['label']['space_factor'] = 2.0\n fig1.yaxis._axinfo['label']['space_factor'] = 2.0\n fig1.zaxis._axinfo['label']['space_factor'] = 2.0\n\n\n# m.set_array(myDensity)\n# plt.colorbar(m,shrink=0.5,aspect=7) \n \n plt.show()\n\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.5706111192703247, "alphanum_fraction": 0.5935786366462708, "avg_line_length": 39.44694519042969, "blob_id": "7bd31fb3200f2c5669f78611a8a6797e770d038c", "content_id": "02145e7c66f318ac9873d9ef8a44da2b53e9855b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12583, "license_type": "no_license", "max_line_length": 206, "num_lines": 311, "path": "/python/MemoryTimeVariation.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\n\nimport ConfigParser as cp\nfrom subprocess import Popen, PIPE\nimport multiprocessing\nimport scipy as sp\nimport os\nimport shutil\nimport glob\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\n#import matplotlib.pylab as plt\n#from matplotlib import rc\n\nimport argh\nimport IOHelper\nfrom MemoryPulseFunctional import conf\n\ndef run_single_job(harmonicBase=None,storageTime=None,cntBase=None,datPath=\"./dat/\",prefix=\"My_\",destPath=\"../parallel/\",generationType=\"r\",useBeta=0,cfg='./python/parameter.cfg'):\n \"\"\" \"\"\"\n #define new working directories + dependencies\n\n base=float(harmonicBase)\n\n configPath=cfg\n print (\"load from config file: \" + configPath)\n configParser = cp.ConfigParser()\n configParser.read(configPath)\n cfg=configParser.__dict__['_sections'].copy() \n\n newDir=dest(destPath,base,storageTime[0])\n\n# newDir=destPath+\"interval_{:07.3f}w_{:07.3f}r_base/\".format(writeBase,readBase)\n\n print (\"create folder dependencies:\" + newDir)\n if not os.path.exists(newDir):\n os.makedirs(newDir)\n if not os.path.exists(newDir+\"dat\"):\n os.makedirs(newDir+\"dat\")\n\n for data in glob.glob(datPath):\n if os.path.exists(newDir+data): \n shutil.rmtree(newDir+data) \n shutil.copytree(data, newDir+data)\n\n for srcDir in glob.glob(\"./src*\"):\n if os.path.exists(newDir+srcDir): \n shutil.rmtree(newDir+srcDir) \n shutil.copytree(srcDir, newDir+srcDir)\n\n\n## shutil.copy(\"./python/test.sh\",newDir+\"test.sh\")\n\n #define config file paramters\n # TODO {t2_start} = Pi/harmonicBase/{harmonic_base}D0*storageTime\n # {timecnt_harmonic} = *timeStorage\n configParser.set('MEFourier','{storage_base}' ,\"{0:.3f}D0\".format(base))\n configParser.set('METime' ,'{storage_time}' ,\"Pi/storeBase/{0:.3f}D0*{1:.3f}D0\".format(base,storageTime[0]))\n configParser.set('METime' ,'{storage_timecnt}',\"{0:}\".format(int(cntBase*storageTime[0])))\n # = *timeStorage\n configParser.set('FILES','{prefix}',newDir+datPath+prefix)\n\n\n #write config file\n with open(newDir+\"parameter.cfg\", 'wb') as configfile:\n configParser.write(configfile)\n\n cmd=\"./python/MemoryPulseEvaluate.py --silent=\" + \"1\" + \\\n \" -u=\" + str(useBeta) + \\\n \" -v=\" + \"50\" + \\\n \" -w=\" + newDir + \\\n \" --cfg=\" + newDir + \"parameter.cfg\" + \\\n \" -g=\" + generationType \n print (cmd)\n log=newDir+\"evaluate.log\"\n optimize = Popen(cmd.split(),stdout=open(log, 'wb'))\n optimize.wait()\n\n if (newDir != \"./\" or newDir != \"\"):\n for srcDir in glob.glob(newDir+\"src*\"):\n if os.path.exists(srcDir): \n shutil.rmtree(srcDir) \n\ndef dest(destPath,harmonicBase,storageTime):\n return destPath+\"base{0:07.3f}_TStore{1:07.3f}/\".format(float(harmonicBase),storageTime)\n\n\ndef main_routine (harmonicBase=10.0,memoTime=2.0,memoCnt=1,cntBase=400,datPath=\"./dat/\",prefix=\"My_\",destPath=\"../parallel/\",gentype=\"c\",pltId=\"funval\",projection=-1,useBeta=0,cfg='./python/parameter.cfg'):\n \"\"\"\n Parameters:\n -----------\n --harmonicBase: fundamental frequency for storage pulse in fractions of Pi/{base_rabi}/harmonicBase.\n (float)\n --memoTime: maximum value of storage time interval in multiples of Pi/{base_rabi}.\n (float)\n --memoCnt: number of storage intervals from 1.0 to memoTime times Pi/{base_rabi}.\n (int)\n --cntBase: discretisation in time of shortest reading pulse, will be adapted by the size of the storage window.\n (int)\n --datPath: path to optimized src data, evaluated with evaluateMinimum.py\n (string)\n --prefix: prefix of files after datPath\n (string)\n --destPath: directory where output files are generated (for every parameter setting of\n harmonicBase, storageTime a corresponding subdirectory is created)\n (string)\n --gentype : type of data generation -->\n g : generate data with modNv\n r : read data with modNv and \n c : collect data and produce 3d plot of reading area\n --pltId: name of fitness parameter -->\n 'funval','fidelity_up','fidelity_down','success','memolap','fitness','alpha','beta'\n (string)\n \"\"\"\n dMemo = sp.array(range(0,memoCnt))\n\n if (memoCnt == 1):\n memo_times = (1.0+(memoTime-1.0)*dMemo[:])\n else:\n memo_times = (1.0+(memoTime-1.0)*dMemo[:]/float(memoCnt-1))\n\n# print (dMemo)\n# print (memo_times)\n\n if (gentype==\"g\" or gentype==\"r\"): # generate\n\n# run_single_job(harmonicBase=None,storgeTime=None,cntBase=None,destPath=\"./parallel/\",generationType=\"r\")\n pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())\n results = [ pool.apply_async(run_single_job, args=(harmonicBase,mT,cntBase,datPath,prefix,destPath,gentype,useBeta,cfg))\n for mT in zip(memo_times) ]\n\n results = [ p.get() for p in results ]\n pool.terminate()\n\n elif (gentype==\"c\"): # collect\n\n configPath=cfg\n print (\"load from config file: \" + configPath)\n configParser = cp.ConfigParser()\n configParser.read(configPath)\n cfg=configParser.__dict__['_sections'].copy()\n \n nHarmonic = int(cfg['MEFourier']['{storage_harmonic}'])\n timeCntHarmonic = int(cntBase*memoTime)\n timeCntCntrl = int(cfg['METime']['{storage_timecnt}'])\n timeCntRead = int(cfg['OCTime']['{read_timecnt}'])\n timeCnt = timeCntHarmonic+timeCntCntrl\n\n filelist=[]\n# div =10.0\n## readCnt = 17\n\n omega_c=float(cfg['NVSETUP']['{omega_c}'])*2.0*sp.pi\n omega_r=float(cfg['OCFourier']['{base_rabi}'])/1e6\n tUnit =sp.pi/(2.0*sp.pi*1e3*omega_r)\n newDir=dest(destPath,harmonicBase,memo_times.max())\n cfg['FILES']['{prefix}']=datPath+prefix\n functimeCfg=IOHelper.functionaltimes_readwrite(**cfg)\n ti = int(functimeCfg['idx_ti'])\n tf = int(functimeCfg['idx_tf'])\n ntime = tf-ti\n\n\n name_optimized=cfg['FILES']['{name_optimized}']\n name_readwrite=IOHelper.getNameReadWrite(**cfg) \n name_storage =IOHelper.getNameStorage(**cfg) \n\n cavity=sp.zeros((2,memoCnt,timeCntRead))\n fitness=sp.ones((memoCnt,conf['entries']))*(-1.0)\n functime=sp.zeros((2,memoCnt))\n time =sp.zeros(timeCnt)\n\n# wb=write_base[-1] # choose last element\n errCnt=[]\n\n for mc in range(memoCnt):\n timeCntStorage=cntBase*(memo_times[mc])\n ti_mc = ti + timeCntStorage\n tf_mc = tf + timeCntStorage\n functime[0,mc] = ti_mc\n functime[1,mc] = tf_mc\n\n timeCntCurrent=timeCntStorage+timeCntCntrl\n\n newDir=dest(destPath,harmonicBase,memo_times[mc])\n cfg['FILES']['{prefix}'] = newDir+datPath+prefix\n\n try:\n filename = IOHelper.getFitnessMemory(**cfg)\n print (filename)\n fitness[mc,:] = sp.loadtxt(filename)\n\n cfg['FILES']['{prefix}'] = newDir+datPath+prefix+name_readwrite+name_storage+name_optimized\n\n filename=cfg['FILES']['{prefix}']+\"cavityMode_down\"+cfg['FILES']['{postfix}']\n print (filename)\n time,real,imag = sp.loadtxt(filename).T\n cavity[0,mc,:ntime] = real[ti_mc:tf_mc:]**2 + imag[ti_mc:tf_mc:]**2\n\n filename=cfg['FILES']['{prefix}']+\"cavityMode_up\"+cfg['FILES']['{postfix}']\n print (filename)\n time,real,imag = sp.loadtxt(filename).T\n cavity[1,mc,:ntime] = real[ti_mc:tf_mc:]**2 + imag[ti_mc:tf_mc:]**2\n\n except:\n errCnt.append(\"can't read files <\"+newDir+\">...\\n\")\n\n if (len(errCnt) != 0):\n for err in errCnt:\n print str(err).strip()\n\n print (\"generating color map\")\n\n imin=functime[0,0]\n imax=functime[1,0]\n tmin=0.0\n tmax=time[imax]/tUnit-time[imin]/tUnit\n\n# functimeX =sp.array([1.0,memoTime])\n# functimeYStart =sp.array([time[functime[0,0]]/tUnit,\\\n# time[functime[0,-1]]/tUnit])\n# functimeYStop =sp.array([time[functime[1,0]]/tUnit,\\\n# time[functime[1,-1]]/tUnit])\n# cutShortX =sp.array([1.0,1.0])\n# cutLargeX =sp.array([readBase,readBase])\n# cutShortY =sp.array([writeBase,writeBase+readBase/2.0])\n# cutLargeY =sp.array([writeBase,writeBase+readBase])\n# functimeZ =sp.array([-1.5,-1.5])\n# functimeYMid=sp.array([ti + (tf-ti)/4.0,ti+ (tf-ti)/2.0])\n# # read funtional times t2, t3\n\n\n\n # define the grid over which the function should be plotted (xx and yy are matrices)\n xx, yy = sp.meshgrid(sp.linspace(1.0,memoTime,memoCnt),\n sp.linspace(tmin, tmax, ntime))\n \n zz0 = cavity[0,:,:ntime].T/(cavity[:,:,:ntime].max())\n zz1 = cavity[1,:,:ntime].T/(cavity[:,:,:ntime].max())\n\n font = {\n 'fontsize' : 26,\n 'verticalalignment' : 'top',\n 'horizontalalignment' : 'center'\n }\n\n\n\n fitmax=max(fitness[:,conf[pltId]])\n minProjection=0\n minfit=1.1\n for mc in range(memoCnt):\n if (fitness[mc,conf[pltId]]==-1.0):\n fitness[mc,conf[pltId]]=1.1*fitmax\n fitness[mc,conf[pltId]]=fitness[mc,conf[pltId]]/fitmax\n if (projection == -1 and minfit > fitness[mc,conf[pltId]]):\n minfit=fitness[mc,conf[pltId]]\n minProjection=mc\n \n if (projection != -1):\n minProjection = projection\n\n \n fig = plt.figure()\n fig0 = fig.add_subplot(121, projection='3d')\n fig0.plot_surface(xx, yy, zz0, rstride=10, cstride=1, cmap=cm.Blues, alpha=0.5,zorder=11.0,vmin=-0.25, vmax=1)\n fig0.contourf(xx, yy, zz0, zdir='z', offset=-1.5, cmap=cm.Blues,vmin=-0.25, vmax=1,zorder=1.0)\n fig0.plot(memo_times[:], fitness[:,conf[pltId]], zs=tmax, zdir='y',lw=1.5, color=\"green\")\n\n fig0.plot(sp.linspace(tmin, tmax, ntime), cavity[0,minProjection,:ntime].T/(cavity[:,:,:ntime].max()), zs=memo_times[minProjection], zdir='x',lw=2.5, color=\"blue\",zorder=10.0)\n fig0.plot(sp.linspace(tmin, tmax, ntime), cavity[0,minProjection,:ntime].T/(cavity[:,:,:ntime].max())-1.5, zs=memo_times[minProjection], zdir='x',lw=1.5, color=\"blue\",zorder=10.0)\n fig0.plot(sp.linspace(tmin, tmax, ntime), cavity[1,minProjection,:ntime].T/(cavity[:,:,:ntime].max())-1.5, zs=memo_times[minProjection], zdir='x',lw=1.5, color=\"red\",zorder=10.0)\n\n fig0.set_title(\"a) state \\\"0\\\"\", **font)\n fig0.set_xlabel(\"$\\Delta T_{\\cal S} \\, / \\, \\\\frac{T_R}{2}$\", **font)\n fig0.set_ylabel(\"$t \\, / \\, \\\\frac{T_R}{2}$\", **font)\n fig0.set_zlabel(\"$\\left|A(t)\\\\right|^2 \\, / \\, \\left|A_{max}(t)\\\\right|^2$\", **font)\n fig0.set_zlim(-1.5, 1.1)\n fig0.set_xlim(1.0,memoTime)\n fig0.set_ylim(tmin,tmax)\n\n fig1 = fig.add_subplot(122, projection='3d')\n fig1.plot_surface(xx, yy, zz1, rstride=10, cstride=1, cmap=cm.Reds, alpha=0.5,zorder=11.0,vmin=-0.25, vmax=1)\n fig1.contourf(xx, yy, zz1, zdir='z', offset=-1.5, cmap=cm.Reds,vmin=-0.25, vmax=1,zorder=1.0)\n fig1.plot(memo_times[:], fitness[:,conf[pltId]], zs=tmax, zdir='y',lw=1.5, color=\"green\")\n\n fig1.plot(sp.linspace(tmin, tmax, ntime), cavity[1,minProjection,:ntime].T/(cavity[:,:,:ntime].max()), zs=memo_times[minProjection], zdir='x',lw=2.5, color=\"red\",zorder=10.0)\n fig1.plot(sp.linspace(tmin, tmax, ntime), zz0[:,minProjection]-1.5, zs=memo_times[minProjection], zdir='x',lw=1.5, color=\"blue\",zorder=10.0)\n fig1.plot(sp.linspace(tmin, tmax, ntime), zz1[:,minProjection]-1.5, zs=memo_times[minProjection], zdir='x',lw=1.5, color=\"red\",zorder=10.0)\n\n fig1.set_title(\"b) state \\\"1\\\"\", **font)\n fig1.set_xlabel(\"$\\Delta T_{\\cal S} \\, / \\, \\\\frac{T_R}{2}$\", **font)\n fig1.set_ylabel(\"$t^{(2)}(\\Delta T_{\\cal S}) \\, / \\, T_R$\", **font)\n fig1.set_zlabel(\"$\\left|A(t)\\\\right|^2 \\, / \\, \\left|A_{max}(t)\\\\right|^2$\", **font)\n fig1.set_zlim(-1.5, 1.1)\n fig1.set_xlim(1.0,memoTime)\n fig1.set_ylim(tmin,tmax)\n\n\n plt.show()\n\n else:\n print (\"option unkown: gentype=\"+gentype)\n \n \nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n" }, { "alpha_fraction": 0.688622772693634, "alphanum_fraction": 0.7485029697418213, "avg_line_length": 54.66666793823242, "blob_id": "0272f8f9766565a988286d479ca07cded35fc0ec", "content_id": "82d1fb738b9605c132bfc9ccf85797065d6a8d19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 167, "license_type": "no_license", "max_line_length": 153, "num_lines": 3, "path": "/scripts/ifort-zerospinAnalysis.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nifort -heap-arrays \"$1\"srcRand/MersenneTwister.F90 -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcMain/main_NVCheck_zeroSpins.F95 -free -o \"$1\"ZeroSpinAnalysis\n" }, { "alpha_fraction": 0.5614035129547119, "alphanum_fraction": 0.6587719321250916, "avg_line_length": 20.49056625366211, "blob_id": "ae6c13307690eddcb35e0730ded7b900c6c2b72d", "content_id": "2b86508d5f2767b94a8147d838741d22499adf83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1140, "license_type": "no_license", "max_line_length": 117, "num_lines": 53, "path": "/scripts/spin_state_3d.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# $1: source data\n# $2: output file\n# $3: frequency range: plot from 1-$3 to 1+$3, typically 0.01\n# $4: cbrange\n# $5: t1\n# $6: t2\n# $7: upper time cutoff\n#\n# try for instance:\n# ./scripts/spin_state_3d.sh ./dat/p2_optimized_memory_32h_spin_dynamics.dat memory_4TR.png 720 0.01 0.04 411.2 616.8\nfilename=$1\n\necho \"read data from :\" \"$1\"\necho \"plot png :\" \"$2\"\n\ngnuplot -persist <<PLOT\n\nset terminal pngcairo size 3500,2620 enhanced font 'Helvetica,60'\nset output \"$2\"\n\nset pm3d map\nset pm3d interpolate 4,4\nset palette define (-$4 \"magenta\", 0 \"black\", $4 \"cyan\")\nunset key\n\nset xr[0:$7]\nset xtics 350\nset yr[1-$3:1+$3]\nset cbr[-$4:$4]\nset xlabel \"time in ns\"\nset arrow from $5,1-$3 to $5,1+$3 nohead lt 1 lc rgb \"#bebebe\" lw 3.5 front\nset arrow from $6,1-$3 to $6,1+$3 nohead lt 1 lc rgb \"#bebebe\" lw 3.5 front\n\n\nset multiplot layout 1,2\nset ylabel \"{/Symbol w}_k/{/Symbol w}_c\"\nunset colorbox\nset origin 0.1,0\nset size 0.53,1\nset title \"Re[B_k(t)]\"\nsplot \"${filename}\" u 1:2:3 t \"\"\n\nset colorbox\nunset ylabel \nset origin 0.47,0\nset size 0.53,1\nunset ytics\nset title \"Im[B_k(t)]\"\nsplot \"${filename}\" u 1:2:4 t \"\"\n\nquit\nPLOT\n\n" }, { "alpha_fraction": 0.5273812413215637, "alphanum_fraction": 0.5509834289550781, "avg_line_length": 36.26853561401367, "blob_id": "9c68ab9be499d497ac01351ff98c1b58d5542f8c", "content_id": "5dc98970eff3191ed25f6418534f7853235d4b65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22625, "license_type": "no_license", "max_line_length": 145, "num_lines": 607, "path": "/python/MemoryPulseEvaluate_phasespace_parallel.py~", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nimport scipy.linalg as la\nimport matplotlib.pyplot as plt\nimport multiprocessing\n\nimport IOHelper\nfrom IOHelper import replace_in_file\n\nfrom math import sqrt, sin, cos, pi\nfrom scipy.optimize import minimize\nfrom subprocess import Popen, PIPE, call\nfrom scipy import linspace, outer, ones, sin, cos, size\nfrom scipy.interpolate import interp2d\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n##########################################################################################\n### globals ##############################################################################\ncons =0.0\nfun =0.0\ndim =0.0\nconf =0.0\niteration=0\n### globals ##############################################################################\n##########################################################################################\n\n\n\n\n##########################################################################################\n##########################################################################################\n\ndef constraintNormAlpha (x):\n \"\"\" norm of reading vector must equal the reading amplitude \"\"\"\n global cons,dim\n alpha=x[:2*dim['alpha']:2]+1j*x[1:2*dim['alpha']:2] # extract vector alpha out of variational vector x\n return cons['alpha_norm']**2 - la.norm(alpha[:])**2\n\ndef constraintBetaLow (x):\n \"\"\" constraint the lower bound of the length of 'phase' beta, beta_low < |beta| \"\"\"\n global cons\n beta=x[-2]+1j*x[-1] # extract pahse beta out of variational vector x\n return sp.absolute(beta)-cons['beta_low']\n\ndef constraintBetaTop (x):\n \"\"\" constraint the upper bound of the length of 'phase' beta, beta_top > |beta| \"\"\"\n global cons\n beta=x[-2]+1j*x[-1] # extract pahse beta out of variational vector x\n return cons['beta_top']-sp.absolute(beta)\n\ndef constraintCavityT2_Up (x):\n \"\"\" constraint for the the cavity mode ... |A(T2)| should match |A(T2+TS)| \"\"\"\n global dim,cons,conf\n\n alpha = sp.ones([dim['alpha']+1],complex)\n alpha[:dim['alpha']] = x[:2*dim['alpha']:2]+1j*x[1:2*dim['alpha']:2] # extract vector alpha out of variational vector x\n\n my_Up = sp.dot(alpha.conj().T,cons['vecT2_Up'])\n\n if ( conf['cavityMatch'] == 0) :\n# return 1.0 - sp.absolute(my_Up)/sp.absolute(cavityT2_Up)\n return sp.absolute(cons['cavityT2_Up'])**2-sp.absolute(my_Up)**2 #sp.dot(alpha.conj(),sp.dot(mtrxT2_Up,alpha)).real\n else :\n return sp.absolute(cons['cavityT2_Up'] - my_Up)\n\ndef constraintCavityT2_Down (x):\n \"\"\" constraint for the the cavity mode ... |A(T2)| should match |A(T2+TS)| \"\"\"\n global dim,cons,conf\n alpha = sp.ones([dim['alpha']+1],complex)\n alpha[:dim['alpha']] = x[:2*dim['alpha']:2]+1j*x[1:2*dim['alpha']:2] # extract vector alpha out of variational vector x\n\n my_Down = sp.dot(alpha.conj().T,cons['vecT2_Down'])\n\n if ( conf['cavityMatch'] == 0) :\n# return 1.0 - sp.absolute(my_Down)/sp.absolute(cavityT2_Down)\n return sp.absolute(cons['cavityT2_Down'])**2-sp.absolute(my_Down)**2 #sp.dot(alpha.conj(),sp.dot(mtrxT2_Down,alpha)).real\n else :\n return sp.absolute(cons['cavityT2_Down'] - my_Down)\n\n\ndef constraintFidelity_Up_Perfect (x):\n global dim,cons\n return 1.0-evaluateFidelity(cons['fidelity_Up'],x,dim['alpha'])\n\n\ndef constraintFidelity_Up_tol (x):\n global dim,cons\n return evaluateFidelity(cons['fidelity_Up'],x,dim['alpha'])-cons['fidelity_Tol']\n\n\ndef constraintFidelity_Down_Perfect (x):\n global dim,cons\n return 1.0-evaluateFidelity(cons['fidelity_Down'],x,dim['alpha'])\n\n\ndef constraintFidelity_Down_tol (x):\n global dim,cons\n return evaluateFidelity(cons['fidelity_Down'],x,dim['alpha'])-cons['fidelity_Tol']\n\n\ndef functionalFidelity(x):\n global conf\n myFunc = 0.0\n if ( conf['toMinimize'] == 1) :\n myFunc = constraintFidelity_Up_Perfect(x)\n elif ( conf['toMinimize'] == 0) :\n myFunc = constraintFidelity_Down_Perfect(x)\n else : \n myFunc = constraintFidelity_Up_Perfect(x) + constraintFidelity_Down_Perfect(x)\n\n return myFunc\n\n \ndef evaluateFidelity(fidelity_vector,x,dimAlpha):\n gamma = sp.ones([dimAlpha+1],complex)\n gamma[:dimAlpha] = x[:2*dimAlpha:2]+1j*x[1:2*dimAlpha:2]\n return sp.absolute(sp.dot(gamma.conj().T,fidelity_vector))\n\n\ndef evaluateMemOlap (x):\n global dim,cons\n gamma = sp.ones([dim['alpha']+1],complex)\n gamma[:dim['alpha']] = x[:2*dim['alpha']:2]+1j*x[1:2*dim['alpha']:2]\n return (sp.dot(gamma.conj().T,sp.dot(cons['mtrxMemOlap'],gamma))).real\n\n\ndef chiSquared (x):\n \"\"\" evaluates chiSquared measure sum_k |B_k(T1)-B_k(T2)|^2 of the spin ensemble \"\"\"\n global dim,fun,conf\n \n myBeta = x[-2]+1j*x[-1] # beta\n\n gamma = sp.zeros([dim['total']],complex)\n gamma[:dim['alpha']] = x[:2*dim['alpha']:2]+1j*x[1:2*dim['alpha']:2]\n gamma[-1] = myBeta # beta\n\n# gamma[:dim['alpha']] = myBeta*(x[:2*dim['alpha']:2]+1j*x[1:2*dim['alpha']:2])\n# gamma[-3] = myBeta\n# gamma[-2] = myBeta\n\n if ( conf['toMinimize'] == 1) :\n myFunc = fun['mtrxBeta_Up']\n elif ( conf['toMinimize'] == 0) :\n myFunc = fun['mtrxBeta_Down']\n else : \n myFunc = fun['mtrxBeta_Up'] + fun['mtrxBeta_Down']\n\n# return sp.sqrt((sp.dot(gamma.conj().T,sp.dot(myFunc,gamma))).real)\n return (sp.dot(gamma.conj().T,sp.dot(myFunc,gamma))).real\n\ndef functionalMemOlapChi2 (x):\n return chiSquared(x)+evaluateMemOlap(x)\n \ndef chiSquaredGradient (x):\n \"\"\" evaluates chiSquared measure sum_k |B_k(T1)-B_k(T2)|^2 of the spin ensemble \"\"\"\n global dim,conf,fun\n \n gamma = sp.zeros([dim['total']],complex)\n gamma[:dim['alpha']] = x[:2*dim['alpha']:2]+1j*x[1:2*dim['alpha']:2]\n# gamma[-3] = 0.0+0.0j # TODO ... check if 0 or 1\n# gamma[-2] = 0.0+0.0j\n gamma[-1] = x[-2]+1.0j*x[-1] # beta\n\n if ( conf['toMinimize'] == 1) :\n myFunc = fun['mtrxBeta_Up']\n elif ( conf['toMinimize'] == 0) :\n myFunc = fun['mtrxBeta_Down']\n else : \n myFunc = fun['mtrxBeta_Up'] + fun['mtrxBeta_Down']\n \n complxGradient = sp.dot(myFunc,gamma)\n \n functGradient = sp.zeros([dim['alpha']*2+2])\n functGradient[ :2*dim['alpha']:2] = 2.0*complxGradient[:dim['alpha']].real\n functGradient[1:2*dim['alpha']:2] = 2.0*complxGradient[:dim['alpha']].imag\n functGradient[ -2] = 2.0*complxGradient[-1].real\n functGradient[ -1] = 2.0*complxGradient[-1].imag\n\n return functGradient\n\ndef monitor(x):\n global iteration, conf\n if conf['silent'] == 0 :\n print (\" Iteration: {0:6d}\\r\".format(iteration)),\n iteration+=1\n##########################################################################################\n##########################################################################################\n\n\ndef do_single_variation (phi,theta,cntPhi,cntTheta):\n global conf,cons\n if ( conf['toMinimize'] == 1) :\n minContraints= ( {'type' : 'ineq', 'fun' : constraintNormAlpha },\n {'type' : 'ineq', 'fun' : constraintBetaLow },\n {'type' : 'ineq', 'fun' : constraintBetaTop },\n {'type' : 'eq', 'fun' : constraintCavityT2_Up },\n {'type' : 'eq', 'fun' : constraintFidelity_Up_Perfect },\n )\n elif ( conf['toMinimize'] == 0) :\n minContraints= ( {'type' : 'ineq', 'fun' : constraintNormAlpha },\n {'type' : 'ineq', 'fun' : constraintBetaLow },\n {'type' : 'ineq', 'fun' : constraintBetaTop },\n {'type' : 'eq', 'fun' : constraintCavityT2_Down },\n {'type' : 'eq', 'fun' : constraintFidelity_Down_Perfect },\n )\n else : \n minContraints= ( {'type' : 'ineq', 'fun' : constraintNormAlpha },\n {'type' : 'ineq', 'fun' : constraintBetaLow },\n {'type' : 'ineq', 'fun' : constraintBetaTop },\n {'type' : 'eq', 'fun' : constraintCavityT2_Down },\n# {'type' : 'eq', 'fun' : constraintFidelity_Down_Perfect },\n {'type' : 'eq', 'fun' : constraintCavityT2_Up },\n# {'type' : 'eq', 'fun' : constraintFidelity_Up_Perfect },\n {'type' : 'eq', 'fun' : evaluateMemOlap },\n )\n\n varMax = 1\n varStep = 1\n success = False\n# returnFun = sp.zeros([conf['entries']])\n\n while varStep <= varMax and not success :\n alpha0 =sp.zeros([dim['alpha']],complex)\n alpha0[conf['id0']]=sp.cos(phi)*sp.sin(theta)+0j\n alpha0[conf['id1']]=sp.sin(phi)*sp.sin(theta)+0j\n alpha0[conf['id2']]=sp.cos(theta)+0j\n\n if (sp.absolute(alpha0[conf['id0']]) <= 1e-10): \n alpha0[conf['id0']]=0.0+0j\n if (sp.absolute(alpha0[conf['id1']]) <= 1e-10): \n alpha0[conf['id1']]=0.0+0j\n if (sp.absolute(alpha0[conf['id2']]) <= 1e-10): \n alpha0[conf['id2']]=0.0+0j\n\n \n alpha0[:]=alpha0[:]*cons['alpha_norm'] # normalize coefficients for alpha -> defines net-power\n\n print \"### spherical map: phi/pi={0:f}, theta/pi={1:f}\".format(phi/sp.pi,theta/sp.pi)+\" \"+str(cntPhi)+\" \"+str(cntTheta)\n\n returnFun=sp.zeros([conf['entries']])\n returnFun[0]=alpha0[conf['id0']].real\n returnFun[1]=alpha0[conf['id1']].real\n returnFun[2]=alpha0[conf['id2']].real\n\n # gamma0=sp.ones([dim['total']])\n\n beta0 =(cons['beta_low'] + cons['beta_top'])/2.0\n\n x0 =sp.zeros([2*dim['alpha']+2]) # map complex valued vector to real valued vector\n x0[ :2*dim['alpha']:2] =alpha0[:].real # odds: 1,3,5,... <-> real part\n x0[1:2*dim['alpha']:2] =alpha0[:].imag # even: 2,4,6,... <-> imag part\n x0[-2] =beta0.real # corresp. x0[dim['alpha']]\n x0[-1] =beta0.imag # corresp. to x0[dim['alpha']+1]\n\n if (conf['silent']==0):\n print (\" shape x_alpha : \" + str(x0[:2*dim['alpha']].shape) + \"; normAlpha**2 - norm(alpha_0)**2:\" + str(constraintNormAlpha(x0)))\n print (\" length of beta: \" + str(sp.absolute(x0[-2]+1j*x0[-1])))\n print (\" minimize chi-squared measure sum_sigma (sum_k |B^sigma_k(T1)-B^sigma_k(T2)|^2) with (method=SLSQP):\")\n\n iteration=1\n res=minimize(chiSquared, #functionalMemOlapChi2, #evaluateMemOlap, #\n x0, \n method='SLSQP',\n jac=chiSquaredGradient,\n constraints=minContraints,\n tol=cons['chi2_Tol'],\n options={'maxiter' : 40000, 'disp' : False},\n# callback=monitor\n )\n\n success=res.success\n alpha_c=res.x[:2*dim['alpha']:2]+1j*res.x[1:2*dim['alpha']:2]\n beta_c =res.x[-2]+1j*res.x[-1]\n\n if (conf['silent']==0):\n print (\"\") \n print (\" current norm (aplha) = \" +str(la.norm(alpha_c)))\n print (\" current length( beta) = \" +str(sp.absolute(beta_c)))\n print (\" current fidelity up = \" +str(evaluateFidelity(cons['fidelity_Up'],res.x,dim['alpha'])))\n print (\" current fidelity down = \" +str(evaluateFidelity(cons['fidelity_Down'],res.x,dim['alpha'])))\n\n# except:\n# print (\"\") \n# print (\" error in minimization ... try next start-vector\")\n\n\n\n if (success) :\n returnFun[-7]=1.0\n else :\n returnFun[7]=0.0\n\n returnFun[-6]=sp.absolute(beta_c)\n returnFun[-5]=la.norm(alpha_c)\n\n returnFun[-4]=evaluateMemOlap(res.x)\n\n returnFun[-3]=evaluateFidelity(cons['fidelity_Up'],res.x,dim['alpha'])\n returnFun[-2]=evaluateFidelity(cons['fidelity_Down'],res.x,dim['alpha'])\n\n if (res.fun >= conf['cutoff']):\n returnFun[-1]=conf['cutoff']\n else:\n returnFun[-1]=res.fun\n\n \n varStep+=1\n # while ... functional variation\n\n if (conf['silent']==0):\n if (success):\n print \"\\n### done with minimization: SUCEEDED\"\n else :\n print \"\\n### done with minimization: NO SUCCESS\"\n\n# print minfun[cntPhi,cntTheta,:]\n return returnFun,cntPhi,cntTheta\n\n\n##########################################################################################\n### main routine #########################################################################\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",generationType=\"p\",\\\n cut1=51.4,cut2=102.8,cut3=154.0,\\\n toMinimize =2,\\\n cavityMatch=1,silent=0,\\\n cutoff=10000,dimGrid=11,id0=0,id1=1,id2=2,pltId=-1,pltLim=1000):\n print \"#################################################################\"\n print \"#################################################################\"\n print \"### optimal control #############################################\"\n print \"### memory pulse evaluation #####################################\"\n print \"#################################################################\"\n print \"#################################################################\"\n\n ### globals for functional variation\n global cons, fun, dim, conf\n\n# global mtrxBeta_Down,mtrxBeta_Up,\\\n# cavityT2_Down,cavityT2_Up,\\\n# mtrxT2_Down,mtrxT2_Up,vecT2_Down,vecT2_Up,fidelity_Up,fidelity_Down,\\\n# normAlpha,beta_low,beta_top,tol_fidelity,\\\n# dimH,dimAlpha,\\\n# iteration,\\\n# conf\n\n cons = dict()\n fun = dict()\n dim = dict()\n conf = dict()\n conf['toMinimize'] =toMinimize\n conf['cavityMatch']=cavityMatch\n conf['silent'] =silent\n\n conf['id0'] =id0\n conf['id1'] =id1\n conf['id2'] =id2\n conf['cutoff'] =cutoff\n conf['entries'] =10\n\n ### globals for functional variation\n\n ### generate working environment ###\n print (\"### working directory: \" + wd)\n tmpDir = wd+\"tmp/\" \n cmd = \"mkdir -p \" + tmpDir\n call(cmd.split())\n ### generate working environment ###\n\n ### read config file ###\n print (\"### load config file: \" + cfg)\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n cons['alpha_norm']=float(cfg['MEConstraints'][\"{storage_amplitude}\"])\n cons['beta_low'] =float(cfg['MEConstraints'][\"{limit_low_beta}\"])\n cons['beta_top'] =float(cfg['MEConstraints'][\"{limit_top_beta}\"])\n\n dim['alpha']=int(cfg['MEFourier']['{storage_harmonic}'])\n dim['total']=dim['alpha']+3\n\n prefix =cfg['FILES']['{prefix}']\n postfix =cfg['FILES']['{postfix}']\n name_optimized=cfg['FILES']['{name_optimized}']\n\n name_spin =cfg['FILES']['{name_spin}']\n name_cavity =cfg['FILES']['{name_cavity}']\n\n name_readwrite=IOHelper.getNameReadWrite(**cfg) \n name_storage =IOHelper.getNameStorage (**cfg) \n name_varinit =IOHelper.getNameInitialVariation (**cfg)\n\n myTime =IOHelper.functionaltimes_readwrite(**cfg) # reads time and updates cfg: \n # cfg['METime']['{fidelity_ti}'] = myTime['idx_ti']\n # cfg['METime']['{fidelity_tf}'] = myTime['idx_tf']\n\n\n cons['chi2_Tol'] = cfg['MEConstraints']['{tol_chi2}']\n cons['fidelity_Tol'] = cfg['MEConstraints']['{tol_fidelity}']\n\n gridPhi =sp.linspace(0.0, 2.0*sp.pi, num=2*dimGrid-1)\n gridTheta =sp.linspace(0.0, sp.pi, num=dimGrid)\n minfun =sp.zeros([len(gridPhi),len(gridTheta),conf['entries']])\n\n if (generationType == \"p\"):\n print \"## read from file: \" + name_varinit\n raw = sp.loadtxt(name_varinit)\n minfun = raw.reshape(len(gridPhi),len(gridTheta),conf['entries'])\n\n else:\n ### prepare and complie fortran routines ###\n print (\"### prepare fortran routines\")\n replace_in_file('./python/py.parNvCenter.F95' , tmpDir +'parNvCenter.F95' , **cfg['NVSETUP'])\n replace_in_file('./python/py.parMemoryPulse.F95', tmpDir +'parMemoryPulse.F95', **cfg['MEFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MESpin'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MEConstraints'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['METime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCTime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['FILES'])\n\n #write config file\n with open(prefix+\"parameter.cfg\", 'wb') as configfile:\n configParser.write(configfile)\n ### read config file ###\n\n print (\"### compile fortran routines\")\n cmd = \"mv \"+tmpDir+\"parMemoryPulse.F95 \"+wd+\"srcOptCntrl/parMemoryPulse.F95\"\n call(cmd.split())\n cmd = \"mv \"+tmpDir+\"parNvCenter.F95 \"+wd+\"srcNv/parNvCenter.F95\"\n call(cmd.split())\n\n cmd = \"./scripts/ifort-memoryHarmonics.sh \" + wd\n call(cmd.split())\n\n print (\"### invoke fortran routines\")\n print (\"### generation Type: \" + generationType)\n cmd = wd+\"memoryHarmonics\" # location of executable fortran program\n generateHarmonics = Popen(cmd.split(), stdin=PIPE) # run fortran program with piped standard input\n cmd = \"echo \" + generationType # communication with fortran-routine: chose action -> read or generate\n generateInput = Popen(cmd.split(), stdout=generateHarmonics.stdin) # send action to fortran program\n output = generateHarmonics.communicate()[0]\n generateInput.wait()\n ### prepare and complie fortran routines ###\n\n ### read data for functional variation ###\n cons['cavityT2_Down'], \\\n cons['cavityT2_Up'] = IOHelper.read_CavityMemory (**cfg['FILES'])\n\n fun ['mtrxBeta_Up'], \\\n cons['mtrxT2_Up'] , \\\n cons['vecT2_Up'] , \\\n cons['fidelity_Up'], \\\n cons['mtrxMemOlap'] = IOHelper.read_MtrxMemory(\"up\", **cfg['FILES']) \n\n fun ['mtrxBeta_Down'], \\\n cons['mtrxT2_Down'] , \\\n cons['vecT2_Down'] , \\\n cons['fidelity_Down'], \\\n __ = IOHelper.read_MtrxMemory(\"down\", **cfg['FILES']) \n\n\n #overlap_Up =0.0\n #overlap_Down =0.0\n\n\n ### read data for functional variation ###\n\n ### functional variation ###\n print (\"\\n### start minimization: \")\n\n print (\"\\n### on initial \"+ str(sp.shape(minfun)[0])+\"x\"+str(sp.shape(minfun)[1]) + \"-grid (phi,theta) on sphere\")\n\n mincnt=0\n cntPhi =0\n t0=0\n tcnt=len(gridTheta)\n\n for phi in gridPhi[0:len(gridPhi)-1]: \n if(cntPhi == 1):\n t0=1\n tcnt=len(gridTheta)-1\n\n pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())\n\n results = [ pool.apply_async(do_single_variation, args=(phi,theta,cntPhi,cntTheta))\n for theta,cntTheta in zip(gridTheta[t0:tcnt],sp.arange(t0,tcnt,1)) ]\n\n for p in results:\n myResult,i,j=p.get()\n minfun[i,j,:]=myResult\n\n# minfun[cntPhi,t0:tcnt,:]=[ p.get() for p in results ]\n\n# cntTheta=0\n# for theta in gridTheta[t0:tcnt]:\n# \n# do_single_variation (phi,theta,minfun[cntPhi,cntTheta,:])\n\n# mincnt=mincnt+1\n# cntTheta=cntTheta+1\n# # end for theta\n cntPhi=cntPhi+1\n # end for phi\n\n minfun[ :, 0,:]=minfun[0, 0,:] # theta=0 : same vector on unit-sphere\n minfun[ :,-1,:]=minfun[0,-1,:] # theta=pi : same vector on unit-sphere\n minfun[-1, :,:]=minfun[0, :,:] # periodic\n\n print \"## write to file: \" + name_varinit\n print \"## shape : \" + str(sp.shape(minfun))\n print \"## lenphi : \" + str(len(gridPhi))\n print \"## lentheta: \" + str(len(gridTheta))\n sp.savetxt(name_varinit,minfun.reshape((len(gridPhi)*len(gridTheta),conf['entries'])),\\\n header='# alpha0['+str(id0)+']; alpha0['+str(id1)+']; alpha0['+str(id2)+'];'\\\n +' success; norm[alpha]; norm[beta]; '\\\n +' minfun; fidelity_up; fidelity_down') \n\n print \"#################################################################\"\n print \"#################################################################\"\n\n\n font = {\n 'fontsize' : 26,\n }\n\n\n fig = plt.figure()\n# fig0 = fig.add_subplot(121, projection='3d')\n# fig0.plot_surface(xx, yy, zz0, rstride=10, cstride=5, cmap=cm.Blues, alpha=0.5,zorder=11.0,vmin=-0.25, vmax=1)\n\n if pltId==-1:\n mytitle=\"functional value\"\n elif pltId==-2:\n mytitle=\"fidelity \\\"$0$\\\"\"\n elif pltId==-3:\n mytitle=\"fidelity \\\"$1$\\\"\"\n elif pltId==-4:\n mytitle=\"overlap \\\"$0$\\\" and \\\"$1$\\\"\"\n elif pltId==-5:\n mytitle=\"norm $\\\\alpha$\"\n elif pltId==-6:\n mytitle=\"norm $\\\\beta$\"\n elif pltId==-7:\n mytitle=\"variation success\"\n\n\n fig3D = fig.add_subplot(121, projection='3d')\n\n x = outer(cos(gridPhi), sin(gridTheta))\n y = outer(sin(gridPhi), sin(gridTheta))\n z = outer(ones(size(gridPhi)), cos(gridTheta))\n\n myDensity=sp.zeros([len(gridPhi),len(gridTheta)])\n for i in sp.arange(0,len(gridPhi),1):\n for j in sp.arange(0,len(gridTheta),1):\n if (minfun[i,j,pltId] >= pltLim):\n myDensity[i,j] = pltLim\n else:\n myDensity[i,j] = minfun[i,j,pltId]\n \n# myDensity = plt.cm.afmhot(myDensity/pltLim)\n# myDensity = plt.cm.hot(myDensity/myDensity.max())\n myDensity = plt.cm.terrain(myDensity/pltLim)\n surf = fig3D.plot_surface(x, y, z, rstride=1, cstride=1,facecolors=myDensity)\n# wires = fig3D.plot_wireframe(x, y, z, rstride=5, cstride=5, color=\"black\")\n fig3D.plot(cos(gridPhi), sin(gridPhi), zs=0, zdir='z',lw=0.5, color=\"black\")\n fig3D.plot(cos(gridPhi), sin(gridPhi), zs=0, zdir='x',lw=0.5, color=\"black\")\n\n fig3D.set_xlabel(\"$\\\\alpha_{:}$\".format(id0), **font)\n fig3D.set_ylabel(\"$\\\\alpha_{:}$\".format(id1), **font)\n fig3D.set_zlabel(\"$\\\\alpha_{:}$\".format(id2), **font)\n fig3D.set_title(\"a) 4d-plot of \"+mytitle, **font)\n\n\n fig0 = fig.add_subplot(122)\n fig0_colors=sp.linspace(0,pltLim,40)\n\n\n cp0=fig0.contourf(gridPhi/sp.pi,gridTheta/sp.pi,minfun[:,:,pltId].T,fig0_colors,\\\n cmap=\"terrain\"\\\n# cmap=\"hot\"\\\n# cmap=\"afmhot\"\\\n# cmap=\"gist_stern\"\\\n )\n cb0=plt.colorbar(cp0)\n fig0.set_xlabel(\"$\\phi/\\pi$\", **font)\n fig0.set_ylabel(\"$\\\\theta/\\pi$\", **font)\n fig0.set_title(\"b) map of \"+mytitle, **font)\n fig0.invert_yaxis()\n\n plt.show()\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n" }, { "alpha_fraction": 0.5613779425621033, "alphanum_fraction": 0.5912178754806519, "avg_line_length": 35.114036560058594, "blob_id": "d16f851baf7efcc097a03849dc98b06375466a82", "content_id": "501da78b82751685465935f8b7adb800141f2dd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4122, "license_type": "no_license", "max_line_length": 101, "num_lines": 114, "path": "/python/MemoryPulsePlot-fs.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\nimport IOHelper\n\n\n##########################################################################################\n### main routine #########################################################################\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (cfg=\"./python/parameter.cfg\",start=-1,cut1=1000.0,cut2=1000.0,cut3=1000.0,stop=-1):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n nTimeRead =int(cfg['OCTime']['{read_timecnt}'])\n nTimeStore=int(cfg['METime']['{storage_timecnt}'])\n ### read config file ###\n\n ### read data for functional variation ### \n cavityBaseDown=IOHelper.harmonics_storage(\"down\",**cfg)\n cavityBaseUp =IOHelper.harmonics_storage(\"up\" ,**cfg)\n time =IOHelper.time_storage(**cfg)\n ### read data for functional variation ###\n\n # gamma0=sp.ones([dimH])\n filename=IOHelper.getVectorMemory(**cfg)\n reAlpha,imAlpha=sp.loadtxt(filename).T #sp.random.random_sample([dimH])+0j\n alphaS=reAlpha[0:nStore]-1j*imAlpha[0:nStore]\n beta =reAlpha[-2] -1j*reAlpha[-1]\n\n gammaS =sp.ones([nStore+1],complex) # |alpha_1, ..., alpha_N, 1 >\n gammaS[:nStore]=alphaS[:nStore]\n gammaS[-1] =beta\n\n ### plotting\n cavityModeDown=sp.dot(gammaS.conj(),cavityBaseDown)\n cavityModeUp =sp.dot(gammaS.conj(),cavityBaseUp)\n cavityMax = max(max(sp.absolute(cavityModeDown[:])**2),max(sp.absolute(cavityModeUp[:])**2))\n time['store']=time['store']*1e9/omega_c\n time['read']=time['read']*1e9/omega_c\n time['full']=time['full']*1e9/omega_c\n\n\n filename = cfg['FILES']['{prefix}']+cfg['FILES']['{name_readwrite}']+\\\n cfg['FILES']['{name_storage}']+cfg['FILES']['{name_optimized}']\n mytime,A_Re_U,A_Im_U = sp.loadtxt(filename+\"cavityMode_up\"+cfg['FILES']['{postfix}']).T\n mytime,A_Re_D,A_Im_D = sp.loadtxt(filename+\"cavityMode_down\"+cfg['FILES']['{postfix}']).T\n cavityMode_U = sp.absolute(A_Re_U + 1j*A_Im_U)**2\n cavityMode_D = sp.absolute(A_Re_D + 1j*A_Im_D)**2\n\n\n plt.subplot(3,2,1)\n plt.title(\"$\\\\alpha_{S}$\")\n plt.bar(sp.arange(1,nStore+1,1),alphaS.real)\n plt.ylabel(\"real part\")\n\n plt.subplot(3,2,2)\n plt.title(\"$\\\\alpha_{S}$\")\n plt.bar(sp.arange(1,nStore+1,1),alphaS.imag)\n plt.ylabel(\"imag part\")\n\n plt.subplot2grid((3,2),(1,0),colspan=2,rowspan=2)\n# plt.plot(time['full'][:],sp.absolute(cavityModeDown[:]-cavityBaseDown[0,:])**2,label=\"state '0'\")\n# plt.plot(time['full'][:],sp.absolute(cavityModeUp[:]-cavityBaseUp[0,:])**2,label=\"state '1'\")\n\n plt.plot(time['full'],sp.absolute(cavityModeDown)**2,label=\"state '0'\")\n plt.plot(time['full'],sp.absolute(cavityModeUp)**2,label=\"state '1'\")\n\n plt.plot(mytime,cavityMode_D,label=\"state '0' test\")\n plt.plot(mytime,cavityMode_U,label=\"state '1' test\")\n\n plt.legend()\n plt.xlabel('time in ns')\n plt.ylabel('$|A(t)|^2$')\n\n \n if start != -1 and stop != -1:\n plt.xlim([start,stop])\n else:\n plt.xlim([min(time['full']),max(time['full'])])\n\n plt.ylim([0,cavityMax*1.1])\n\n plt.plot([time['ti']*1e9/omega_c,time['ti']*1e9/omega_c],[0,cavityMax*1.1],'k--',linewidth=2.0)\n plt.plot([time['tf']*1e9/omega_c,time['tf']*1e9/omega_c],[0,cavityMax*1.1],'k--',linewidth=2.0)\n plt.plot([cut1,cut1],[0,cavityMax*1.1],linewidth=2.0)\n plt.plot([cut2,cut2],[0,cavityMax*1.1],linewidth=2.0)\n plt.plot([cut3,cut3],[0,cavityMax*1.1],linewidth=2.0)\n plt.show()\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.710659921169281, "alphanum_fraction": 0.7664974331855774, "avg_line_length": 97, "blob_id": "cd32ae4c0e6afb7feb3488a365f1c67b04029d8e", "content_id": "280d78500871d6cdb514ce56a43c7898c98c2242", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 197, "license_type": "no_license", "max_line_length": 183, "num_lines": 2, "path": "/scripts/ifort-complie.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\nifort -heap-arrays ./srcRand/MersenneTwister.F90 -Tf ./srcNv/modNvCenter.F95 -Tf ./srcOptCntrl/modSmallestOverlap.F95 -Tf ./srcMain/main.F95 -free /opt/NAG/fll6i25dcl/lib/libnag_nag.a \n" }, { "alpha_fraction": 0.5142138600349426, "alphanum_fraction": 0.5279802083969116, "avg_line_length": 44.29536819458008, "blob_id": "828dcd2a06c16da2fb6e562eb150236b4ad17bcc", "content_id": "a94bca60cc781e3b8247dbf45c45f48119838a95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23463, "license_type": "no_license", "max_line_length": 115, "num_lines": 518, "path": "/python/IOHelper.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp \nfrom subprocess import Popen, PIPE, call\n\n##########################################################################################\n##########################################################################################\ndef replace_in_file(infile, outfile, **replacements):\n \"\"\"Replace some lines in an input file and write to output file. The\n replacements are supplied via a dictionary.\"\"\"\n\n with open(infile) as src_xml:\n src_xml = src_xml.read()\n\n for src, target in replacements.items():\n src_xml = src_xml.replace(src, target)\n #print(src + \" : \" + target)\n\n out_xml = os.path.abspath(outfile)\n with open(out_xml, \"w\") as out_xml:\n out_xml.write(src_xml)\n\n##########################################################################################\n##########################################################################################\ndef getNameReadWrite (**cfg):\n read_harmonic = int(cfg['OCFourier']['{read_harmonic}'])\n write_harmonic = int(cfg['OCFourier']['{write_harmonic}'])\n\n name_readwrite = \"{0:0>4}r_{1:0>4}w_\".format(read_harmonic,write_harmonic)\n cfg['FILES']['{name_readwrite}']=name_readwrite\n\n return name_readwrite\n\n##########################################################################################\n##########################################################################################\ndef getNameStorage (**cfg): \n name_storage = \"{0:0>4}s_\".format(cfg['MEFourier']['{storage_harmonic}'])\n cfg['FILES']['{name_storage}'] = name_storage\n return name_storage\n\n##########################################################################################\n##########################################################################################\ndef getNameInitialVariation (**cfg):\n prefix = cfg['FILES']['{prefix}']\n postfix= cfg['FILES']['{postfix}']\n name_readwrite = getNameReadWrite(**cfg)\n name_storage = getNameStorage (**cfg)\n filename = name_readwrite+name_storage+cfg['FILES']['{name_optimized}']\n filename = filename+\"{0:0>4}x{1:0>4}\".format(cfg['dim_grid']*2-1,cfg['dim_grid'])\n filename = filename+\"var\"\n return prefix+filename+postfix\n\n##########################################################################################\n##########################################################################################\ndef getNameInitialFittest (**cfg):\n prefix = cfg['FILES']['{prefix}']\n postfix= cfg['FILES']['{postfix}']\n name_readwrite = getNameReadWrite(**cfg)\n name_storage = getNameStorage (**cfg)\n filename = name_readwrite+name_storage+cfg['FILES']['{name_optimized}']\n filename = filename+\"{0:0>4}x{1:0>4}\".format(cfg['dim_grid']*2-1,cfg['dim_grid'])\n filename = filename+\"var_fittest\"\n return prefix+filename+postfix\n\n##########################################################################################\n##########################################################################################\ndef getVectorOverlap (**cfg):\n prefix = cfg['FILES']['{prefix}']\n postfix= cfg['FILES']['{postfix}']\n name_readwrite = getNameReadWrite(**cfg)\n filename = name_readwrite+cfg['FILES']['{name_optimized}']+\"vector\"\n return prefix+filename+postfix\n\n##########################################################################################\n##########################################################################################\ndef getVectorMemory (**cfg):\n prefix = cfg['FILES']['{prefix}']\n postfix= cfg['FILES']['{postfix}']\n name_readwrite = getNameReadWrite(**cfg)\n name_storage = getNameStorage (**cfg)\n filename = name_readwrite+name_storage+cfg['FILES']['{name_optimized}']+\"vector\"\n return prefix+filename+postfix\n\n##########################################################################################\n##########################################################################################\ndef getFitnessMemory (**cfg):\n prefix = cfg['FILES']['{prefix}']\n postfix= cfg['FILES']['{postfix}']\n name_readwrite = getNameReadWrite(**cfg)\n name_storage = getNameStorage (**cfg)\n filename = name_readwrite+name_storage+cfg['FILES']['{name_optimized}']+\"fitness\"\n return prefix+filename+postfix\n\n##########################################################################################\n##########################################################################################\ndef nv_density (**cfg):\n prefix =cfg['FILES']['{prefix}']\n postfix =cfg['FILES']['{postfix}']\n omega, rho =sp.loadtxt(prefix + \"rho_vs_omega\"+ postfix).T\n return omega,rho\n\n##########################################################################################\n##########################################################################################\ndef harmonics_readwrite (**cfg):\n nWrite = int(cfg['OCFourier']['{write_harmonic}'])\n nRead = int(cfg['OCFourier']['{read_harmonic}'])\n nTime = int(cfg['OCTime']['{read_timecnt}'])\n wTime = int(cfg['OCTime']['{write_timecnt}'])\n\n prefix =cfg['FILES']['{prefix}']\n postfix =cfg['FILES']['{postfix}']\n\n print (\" read data : cavity modes\")\n cavityWrite=sp.zeros([nWrite,wTime],complex)\n cavityMemo =sp.zeros([nWrite,nTime],complex)\n cavityRead =sp.zeros([nRead,nTime],complex)\n\n # load memory - part of reg2\n for iMemo in range(nWrite):\n filename=prefix+\"harmonic\"+\"{0:0>4}\".format(iMemo+1)+\"_cavityMode_reg1_write\"+postfix\n __,real,imag = sp.loadtxt(filename).T\n # time,real,imag=sp.loadtxt(filename,unpack=True)\n cavityWrite[iMemo,:] = real[:]+1j*imag[:]\n\n filename=prefix+\"harmonic\"+\"{0:0>4}\".format(iMemo+1)+\"_cavityMode_reg2_memory\"+postfix\n __,real,imag = sp.loadtxt(filename).T\n # time,real,imag=sp.loadtxt(filename,unpack=True)\n cavityMemo[iMemo,:] = real[:]+1j*imag[:]\n\n # load memory - part of reg2\n for iRead in range(nRead):\n filename=prefix+\"harmonic\"+\"{0:0>4}\".format(iRead+1)+\"_cavityMode_reg2_read\"+postfix\n __,real,imag = sp.loadtxt(filename).T\n # time,real,imag=sp.loadtxt(filename,unpack=True)\n cavityRead[iRead,:] = real[:]+1j*imag[:]\n\n return cavityWrite,cavityMemo,cavityRead\n\n##########################################################################################\n##########################################################################################\ndef harmonics_readwrite_pulse (**cfg):\n nWrite = int(cfg['OCFourier']['{write_harmonic}'])\n nRead = int(cfg['OCFourier']['{read_harmonic}'])\n nTime = int(cfg['OCTime']['{read_timecnt}'])\n wTime = int(cfg['OCTime']['{write_timecnt}'])\n\n prefix =cfg['FILES']['{prefix}']\n readwrite =cfg['FILES']['{name_readwrite}']\n readwrite =getNameReadWrite(**cfg)\n optimized =cfg['FILES']['{name_optimized}']\n namedown =cfg['FILES']['{name_down}']\n nameup =cfg['FILES']['{name_up}']\n nameread =cfg['FILES']['{name_read}']\n postfix =cfg['FILES']['{postfix}']\n \n __,downR, downI = sp.loadtxt(prefix+readwrite+optimized+namedown+postfix).T\n __, upR, upI = sp.loadtxt(prefix+readwrite+optimized+nameup +postfix).T\n __,readR, readI = sp.loadtxt(prefix+readwrite+optimized+nameread +postfix).T\n\n return downR+1j*downI,upR+1j*upI,readR+1j*readI\n\n##########################################################################################\n##########################################################################################\ndef functionaltimes_readwrite (**cfg):\n\n nWrite = int(cfg['OCFourier']['{write_harmonic}'])\n nTime = int(cfg['OCTime']['{read_timecnt}'])\n\n prefix =cfg['FILES']['{prefix}']\n postfix =cfg['FILES']['{postfix}']\n name_readwrite=getNameReadWrite(**cfg)\n name_optimized=cfg['FILES']['{name_optimized}']\n\n print (\" read data : functional time\")\n timeRead =sp.zeros([nTime],float)\n\n # load time for reading section memory - part of reg2\n filename=prefix+\"harmonic{0:0>4}_cavityMode_reg2_memory\".format(nWrite)+postfix\n timeRead ,__,__ = sp.loadtxt(filename).T\n\n filename=prefix+\"harmonic{0:0>4}_cavityMode_reg1_write\".format(nWrite)+postfix\n timeWrite,__,__ = sp.loadtxt(filename).T\n\n # read funtional times t2, t3\n configParser2 = cp.ConfigParser()\n configParser2.read(prefix+name_readwrite+name_optimized+\"FunctionalTimes\"+postfix)\n\n time=configParser2.__dict__['_sections']['functime'] # in seconds*wc\n time['read'] =timeRead # in seconds*wc\n time['write']=timeWrite # in seconds*wc\n # read funtional times t2, t3\n\n cfg['METime']['{fidelity_ti}'] = time['idx_ti']\n cfg['METime']['{fidelity_tf}'] = time['idx_tf']\n\n return time\n\n##########################################################################################\n##########################################################################################\ndef read_MtrxOverlap (**cfgFiles):\n# print (\" read data : HReal, HImag ... complex overlap functional\")\n# h_imag = sp.loadtxt(prefix+filename+\"HImag\"+postfix)\n# h_real = sp.loadtxt(prefix+filename+\"HReal\"+postfix)\n prefix =cfgFiles['{prefix}'] + cfgFiles['{name_readwrite}']\n postfix =cfgFiles['{postfix}']\n\n print (\" read data : H0 ... basic separated functional\")\n h0_imag = sp.loadtxt(prefix+\"H0Imag\"+postfix)\n h0_real = sp.loadtxt(prefix+\"H0Real\"+postfix)\n\n print (\" read data : HSDown ... storage matrix\")\n hs_down_imag = sp.loadtxt(prefix+\"HSDownImag\"+postfix)\n hs_down_real = sp.loadtxt(prefix+\"HSDownReal\"+postfix)\n\n print (\" read data : HSUp ... storage matrix\")\n hs_up_imag = sp.loadtxt(prefix+\"HSUpImag\"+postfix)\n hs_up_real = sp.loadtxt(prefix+\"HSUpReal\"+postfix)\n\n print (\" read data : HSep ... separation matrix\")\n hsep_imag = sp.loadtxt(prefix+\"HSepImag\"+postfix)\n hsep_real = sp.loadtxt(prefix+\"HSepReal\"+postfix)\n\n print (\" read data : HCDown ... constraint matrix\")\n hc_down_imag = sp.loadtxt(prefix+\"HCDownImag\"+postfix)\n hc_down_real = sp.loadtxt(prefix+\"HCDownReal\"+postfix)\n\n print (\" read data : HCUp ... constraint matrix\")\n hc_up_imag = sp.loadtxt(prefix+\"HCUpImag\"+postfix)\n hc_up_real = sp.loadtxt(prefix+\"HCUpReal\"+postfix)\n\n myH0 = h0_real + 1j*h0_imag\n myHSDown = hs_down_real + 1j*hs_down_imag\n myHSUp = hs_up_real + 1j*hs_up_imag\n myHSep = hsep_real + 1j*hsep_imag\n myHCDown = hc_down_real + 1j*hc_down_imag\n myHCUp = hc_up_real + 1j*hc_up_imag\n\n# return h_real, h_imag, h0_real + 1j*h0_imag, hc_down_real + 1j*hc_down_imag, hc_up_real + 1j*hc_up_imag\n return myH0, myHSDown, myHSUp, myHCDown, myHCUp, myHSep\n##########################################################################################\n##########################################################################################\n\n##########################################################################################\n##########################################################################################\ndef read_HoleData(storage=\"\",**cfgFiles):\n prefix =cfgFiles['{prefix}'] + cfgFiles['{name_readwrite}']+storage\n postfix =cfgFiles['{postfix}']\n effectiveHole =dict()\n\n print (\"### read data : Hole value - slope - curvature\")\n value_imag = sp.loadtxt(prefix+\"HoleValueI\"+postfix)\n value_real = sp.loadtxt(prefix+\"HoleValueR\"+postfix)\n\n slope_imag = sp.loadtxt(prefix+\"HoleSlopeI\"+postfix)\n slope_real = sp.loadtxt(prefix+\"HoleSlopeR\"+postfix)\n\n curv_imag = sp.loadtxt(prefix+\"HoleCurvI\"+postfix)\n curv_real = sp.loadtxt(prefix+\"HoleCurvR\"+postfix)\n\n effectiveHole['value'] = value_real + 1j*value_imag\n effectiveHole['slope'] = slope_real + 1j*slope_imag\n effectiveHole['curv'] = curv_real + 1j*curv_imag\n \n return effectiveHole\n##########################################################################################\n##########################################################################################\n\n\n##########################################################################################\n##########################################################################################\ndef harmonics_storage (stateKey,**cfg):\n nStore = int(cfg['MEFourier']['{storage_harmonic}'])\n nReadTime = int(cfg['OCTime']['{read_timecnt}'])\n nStoreTime = int(cfg['METime']['{storage_timecnt}'])\n\n name_readwrite = getNameReadWrite(**cfg)\n prefix = cfg['FILES']['{prefix}']\n postfix =cfg['FILES']['{postfix}']\n\n print (\" read data : cavity modes \" +stateKey)\n cavityReg2=sp.zeros([nStore+2,nStoreTime],complex)\n cavityReg3=sp.zeros([nStore+2,nReadTime ],complex)\n\n # load memory - part of reg2\n for iStore in range(nStore):\n filename = name_readwrite+\"harmonic{0:0>4}_cavityMode_\".format(iStore+1)\n\n __,real,imag = sp.loadtxt(prefix+filename+\"reg2_store\"+postfix).T\n cavityReg2[iStore,:] = real[:]+1j*imag[:]\n\n __,real,imag = sp.loadtxt(prefix+filename+\"reg3_read_stored\"+postfix).T\n cavityReg3[iStore,:] = real[:]+1j*imag[:]\n\n ### reading <down/up> cavity-amplitudes ###################################################\n filename = name_readwrite+\"harmonic{0:0>4}_cavityMode_\".format(0)\n\n __,real,imag = sp.loadtxt(prefix+filename+\"reg3_read_drive_\"+stateKey+postfix).T\n cavityReg3[-2,:] = real[:]+1j*imag[:]\n\n __,real,imag = sp.loadtxt(prefix+filename+\"reg2_store_\"+stateKey+postfix).T\n cavityReg2[-1,:] = real[:]+1j*imag[:]\n\n __,real,imag = sp.loadtxt(prefix+filename+\"reg3_read_stored_\"+stateKey+postfix).T\n cavityReg3[-1,:] = real[:]+1j*imag[:]\n\n return cavityReg2,cavityReg3\n\n##########################################################################################\n##########################################################################################\ndef time_storage (**cfg):\n nStore = int(cfg['MEFourier']['{storage_harmonic}'])\n nWriteTime = int(cfg['OCTime']['{write_timecnt}'])\n nReadTime = int(cfg['OCTime']['{read_timecnt}'])\n nStoreTime = int(cfg['METime']['{storage_timecnt}'])\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n name_readwrite = getNameReadWrite(**cfg)\n prefix = cfg['FILES']['{prefix}']\n filename = name_readwrite+\"harmonic{0:0>4}_cavityMode_\".format(0)\n postfix =cfg['FILES']['{postfix}']\n\n print (\" read data : storage and read time\")\n\n ### reading <down> cavity-amplitudes ###################################################\n timeStore,__,__ = sp.loadtxt(prefix+filename+\"reg2_store_down\"+postfix).T\n timeRead ,__,__ = sp.loadtxt(prefix+filename+\"reg3_read_stored_down\"+postfix).T\n\n time=functionaltimes_readwrite (**cfg)\n time['store']=timeStore\n time['read'] =timeRead \n\n\n time['ti'] =timeRead[int(time['idx_ti'])-1]\n time['tf'] =timeRead[int(time['idx_tf'])-1]\n\n return time\n##########################################################################################\n##########################################################################################\ndef read_MtrxMemory (stateKey,**cfgFiles):\n\n prefix =cfgFiles['{prefix}']+cfgFiles['{name_readwrite}']+cfgFiles['{name_storage}']\n postfix =cfgFiles['{postfix}']\n\n print (\"### read data : mtrxBeta_\"+stateKey)\n B_imag = sp.loadtxt(prefix+\"mtrxBetaImag_\"+stateKey+postfix)\n B_real = sp.loadtxt(prefix+\"mtrxBetaReal_\"+stateKey+postfix)\n\n mtrxBeta = B_real +1j*B_imag\n\n print (\"### read data : vecT2_\"+stateKey)\n vT2_real,vT2_imag = sp.loadtxt(prefix+\"vecT2_\"+stateKey+postfix).T\n\n vecT2 = vT2_real +1j*vT2_imag\n\n\n print (\"### read data : fidelity_\"+stateKey)\n fidelity_real,fidelity_imag = sp.loadtxt(prefix+\"vecFidelity_\"+stateKey+postfix).T\n fidelity = fidelity_real + 1j*fidelity_imag\n\n print (\"### read data : mtrxMemOlap\")\n O_real = sp.loadtxt(prefix+\"mtrxMemOlapReal\"+postfix)\n O_imag = sp.loadtxt(prefix+\"mtrxMemOlapImag\"+postfix)\n\n mtrxOverlap = O_real +1j*O_imag\n\n \n return mtrxBeta, vecT2, fidelity, mtrxOverlap\n\n\ndef read_CavityMemory (**cfgFiles):\n filename = cfgFiles['{prefix}']+cfgFiles['{name_readwrite}']+ \\\n cfgFiles['{name_optimized}']+cfgFiles['{name_cavity}']\n\n print (\"### read initial value for cavity up\")\n cavity = sp.loadtxt(filename+\"up\"+cfgFiles['{postfix}'] )\n cavity_up = cavity[0] + 1j*cavity[1]\n\n print (\"### read initial value for cavity down\")\n cavity = sp.loadtxt(filename+\"down\"+cfgFiles['{postfix}'] )\n cavity_down = cavity[0] + 1j*cavity[1]\n\n return cavity_down, cavity_up\n##########################################################################################\n##########################################################################################\n\n\n##########################################################################################\n##########################################################################################\ndef loadCfg (wd=\"./\",cfg=\"./python/parameter.cfg\"):\n\n ### generate working environment ###\n print (\"### working directory: \" + wd)\n tmpDir = wd+\"tmp/\" \n cmd = \"mkdir -p \" + tmpDir\n call(cmd.split())\n ### generate working environment ###\n\n ### read config file ###\n print (\"### load config file: \" + cfg)\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n name_readwrite=getNameReadWrite(**cfg) \n name_storage =getNameStorage (**cfg) \n name_vector =getVectorMemory (**cfg)\n myTime =functionaltimes_readwrite(**cfg) # reads time and updates cfg: \n # cfg['METime']['{fidelity_ti}'] = myTime['idx_ti']\n # cfg['METime']['{fidelity_tf}'] = myTime['idx_tf']\n\n ### prepare and complie fortran routines ###\n print (\"### prepare fortran routines\")\n replace_in_file('./python/py.parNvCenter.F95' , tmpDir +'parNvCenter.F95' , **cfg['NVSETUP'])\n replace_in_file('./python/py.parMemoryPulse.F95', tmpDir +'parMemoryPulse.F95', **cfg['MEFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MESpin'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MEConstraints'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCConstraints'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['METime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCTime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['FILES'])\n replace_in_file('./python/py.par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['MESpin'])\n replace_in_file(tmpDir +'par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['FILES'])\n replace_in_file(tmpDir +'par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['NVSETUP'])\n replace_in_file(tmpDir +'par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['OCFourier'])\n replace_in_file(tmpDir +'par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['MEFourier'])\n\n #write config file\n with open(cfg['FILES']['{prefix}']+\"parameter.cfg\", 'wb') as configfile:\n configParser.write(configfile)\n ### read config file ###\n\n cmd = \"mv \"+tmpDir+\"parMemoryPulse.F95 \"+wd+\"srcOptCntrl/parMemoryPulse.F95\"\n call(cmd.split())\n cmd = \"mv \"+tmpDir+\"parNvCenter.F95 \"+wd+\"srcNv/parNvCenter.F95\"\n call(cmd.split())\n\n return cfg\n##########################################################################################\n##########################################################################################\n\ndef loadSpin (wd=\"./\",cfg=\"./python/parameter.cfg\"):\n\n ### generate working environment ###\n print (\"### working directory: \" + wd)\n tmpDir = wd+\"tmp/\" \n cmd = \"mkdir -p \" + tmpDir\n call(cmd.split())\n ### generate working environment ###\n\n ### read config file ###\n print (\"### load config file: \" + cfg)\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n name_readwrite=getNameReadWrite(**cfg) \n name_storage =getNameStorage (**cfg) \n name_vector =getVectorMemory (**cfg)\n\n ### prepare and complie fortran routines ###\n print (\"### prepare fortran routines\")\n replace_in_file('./python/py.parNvCenter.F95' , tmpDir +'parNvCenter.F95' , **cfg['NVSETUP'])\n replace_in_file('./python/py.parMemoryPulse.F95', tmpDir +'parMemoryPulse.F95', **cfg['MEFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MESpin'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MEConstraints'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCConstraints'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['METime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCTime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['FILES'])\n replace_in_file('./python/py.par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['MESpin'])\n replace_in_file(tmpDir +'par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['FILES'])\n replace_in_file(tmpDir +'par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['NVSETUP'])\n replace_in_file(tmpDir +'par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['OCFourier'])\n replace_in_file(tmpDir +'par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['OCConstraints'])\n replace_in_file(tmpDir +'par_NVCheck_2DSpins.F95', tmpDir +'par_NVCheck_2DSpins.F95', **cfg['MEFourier'])\n\n #write config file\n with open(cfg['FILES']['{prefix}']+\"parameter.cfg\", 'wb') as configfile:\n configParser.write(configfile)\n ### read config file ###\n\n cmd = \"mv \"+tmpDir+\"parMemoryPulse.F95 \"+wd+\"srcOptCntrl/parMemoryPulse.F95\"\n call(cmd.split())\n cmd = \"mv \"+tmpDir+\"parNvCenter.F95 \"+wd+\"srcNv/parNvCenter.F95\"\n call(cmd.split())\n cmd = \"mv \"+tmpDir+\"par_NVCheck_2DSpins.F95 \"+wd+\"srcMain/par_NVCheck_2DSpins.F95\"\n call(cmd.split())\n\n return cfg\n##########################################################################################\n##########################################################################################\n\n\n##########################################################################################\n##########################################################################################\ndef read_MtrxProjection (thetaCnt,phiCnt,**cfgFiles):\n prefix =cfgFiles['{prefix}'] + cfgFiles['{name_readwrite}']\n optimized=cfgFiles['{name_optimized}']\n postfix =cfgFiles['{postfix}']\n\n filename = prefix+optimized+\"MtrxProjection\"+postfix\n \n print (\"### read projection-data \" + filename)\n fullMtrx = sp.loadtxt(filename)\n\n p0 = fullMtrx[:,0]+1j*fullMtrx[:,1]\n p1 = fullMtrx[:,2]+1j*fullMtrx[:,3]\n\n p0 = p0.reshape(thetaCnt,phiCnt)\n p1 = p1.reshape(thetaCnt,phiCnt)\n\n return p0, p1\n##########################################################################################\n##########################################################################################\n" }, { "alpha_fraction": 0.7002456784248352, "alphanum_fraction": 0.7542997598648071, "avg_line_length": 56.85714340209961, "blob_id": "7b43c0fdf8dbae0e58427042b3d60f537e568208", "content_id": "480134eb84c203aa6fbb7835a21ebf75350d40af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 407, "license_type": "no_license", "max_line_length": 167, "num_lines": 7, "path": "/scripts/ifort-generateHarmonics.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm -f \"$1\"generateHarmonics\nrm -f \"$1\"generateOptimized\n\nifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modSmallestOverlap.F95 -Tf \"$1\"srcMain/main_generateHarmonics.F95 -free -o \"$1\"generateHarmonics \nifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modSmallestOverlap.F95 -Tf \"$1\"srcMain/main_generateOptimized.F95 -free -o \"$1\"generateOptimized \n\n" }, { "alpha_fraction": 0.5350671410560608, "alphanum_fraction": 0.5770624876022339, "avg_line_length": 40.30836868286133, "blob_id": "748436065bd46fe2cc1527e8d1da0c8a683d19c1", "content_id": "801c21f4133782396a9eb3e6f1ae440dd1cd7f35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9384, "license_type": "no_license", "max_line_length": 145, "num_lines": 227, "path": "/python/SmallestOverlapPartialOverlap-Bloch.py~", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nfrom scipy.integrate import cumtrapz\nfrom math import acos,asin\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\n\nimport IOHelper\nfrom math import log10, floor\n\n\n##########################################################################################\n### main routine #########################################################################\n\ndef round_sig(x, sig=2):\n return round(x, sig-int(floor(log10(x)))-1)\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",thetaCnt=11,phiCnt=11,bloch=0,myMap=\"jet\"):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n\n time['write'][:] *= 1e9\n time['read'][:] *= 1e9\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf] \n dt = float(time['delta_t'])\n\n filename =IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead]\n alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown]\n alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp]\n ### read data ###\n\n ### plotting\n Reg1Up = sp.dot(alphaU.conj(),cavityWrite)\n Reg1Down = sp.dot(alphaD.conj(),cavityWrite)\n\n Reg2Down = sp.dot(alphaD.conj(),cavityMemo)\n Reg2Up = sp.dot(alphaU.conj(),cavityMemo)\n Reg2Read = sp.dot(alphaR.conj(),cavityRead)\n\n Reg2DownRead = Reg2Down + Reg2Read\n Reg2UpRead = Reg2Up + Reg2Read\n\n spos = sp.zeros([2,thetaCnt,phiCnt],complex)\n FuncInfoOlap = sp.zeros([2,thetaCnt,phiCnt],complex)\n\n# I00 = 1j*cumtrapz( (Reg2Down[ti:tf].conj() * Reg2Down[ti:tf]).imag, x=None, dx=dt )[-1]\n I00 = cumtrapz( (Reg2Down[ti:tf] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1]\n I0R = 1j*cumtrapz( (Reg2Read[ti:tf] * Reg2Down[ti:tf].conj()).imag, x=None, dx=dt )[-1]\n I0R += cumtrapz( (Reg2Read[ti:tf] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1]\n\n# I11 = 1j*cumtrapz( (Reg2Up [ti:tf].conj() * Reg2Up [ti:tf]).imag, x=None, dx=dt )[-1]\n I11 = cumtrapz( (Reg2Up [ti:tf] * Reg2Up [ti:tf].conj()).real, x=None, dx=dt )[-1]\n I1R = 1j*cumtrapz( (Reg2Read[ti:tf] * Reg2Up [ti:tf].conj()).imag, x=None, dx=dt )[-1]\n I1R += cumtrapz( (Reg2Read[ti:tf] * Reg2Up [ti:tf].conj()).real, x=None, dx=dt )[-1]\n\n\n for i in sp.arange(0.0,thetaCnt):\n theta = i/(thetaCnt-1.0) # from 0 to 1 * pi\n \n for j in sp.arange(0.0,phiCnt):\n phi = j/(phiCnt-1.0)*2.0 # from 0 to 2 * pi\n \n spos[0,i,j] = sp.cos(theta*sp.pi) \n spos[1,i,j] = sp.sin(theta*sp.pi)*sp.exp(1j*phi*sp.pi)\n FuncCavity = spos[0,i,j]*Reg2Down[ti:tf] + spos[1,i,j]*Reg2Up[ti:tf]\n FuncCavity[:] += Reg2Read [ti:tf]\n\n FuncInfoOlap [0,i,j] = cumtrapz( (FuncCavity[:] * Reg2Down[ti:tf].conj()).real, x=None, dx=dt )[-1]\n FuncInfoOlap [0,i,j] += 1j*cumtrapz( (FuncCavity[:] * Reg2Down[ti:tf].conj()).imag, x=None, dx=dt )[-1]\n FuncInfoOlap [0,i,j] = (FuncInfoOlap [0,i,j]-I0R)/I00\n\n FuncInfoOlap [1,i,j] = cumtrapz( (FuncCavity[:] * Reg2Up[ti:tf].conj()).real, x=None, dx=dt )[-1]\n FuncInfoOlap [1,i,j] += 1j*cumtrapz( (FuncCavity[:] * Reg2Up[ti:tf].conj()).imag, x=None, dx=dt )[-1]\n FuncInfoOlap [1,i,j] = (FuncInfoOlap [1,i,j]-I1R)/I11\n \n \n\n fig = plt.figure()\n fs = 22\n label_size = 20\n plt.rcParams['xtick.labelsize'] = label_size \n plt.rcParams['ytick.labelsize'] = label_size \n\n if bloch == 0:\n xx, yy = sp.meshgrid(sp.linspace(0.0,2.0,phiCnt),sp.linspace(0.0,1.0,thetaCnt))\n\n zmin = 0.0\n zmax = +1.0\n\n zzOlapR0 = FuncInfoOlap [0,:,:].real\n zzOlapI0 = FuncInfoOlap [0,:,:].imag\n# print zzOlapI0 \n\n zzOlapR1 = FuncInfoOlap [1,:,:].real\n zzOlapI1 = FuncInfoOlap [1,:,:].imag\n\n fig1 = fig.add_subplot(221, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapR0, rstride=5, cstride=5, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapR0.min(), vmax=zzOlapR0.max())\n fig1.set_zlim(-1,1)\n fig1.set_title(\"a) Re$[(\\\\tilde O($\\\"$0$\\\"$)-\\\\tilde {\\cal I}_{0R})/\\\\tilde {\\cal I}_{00}]\\\\approx\\cos(\\\\theta)$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi / \\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(223, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapI0, rstride=5, cstride=5, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapI0.min(), vmax=zzOlapI0.max())\n fig1.set_zlim(-0.01,0.01)\n fig1.set_title(\"c) Im$[(\\\\tilde O($\\\"$0$\\\"$)-\\\\tilde {\\cal I}_{0R})/\\\\tilde {\\cal I}_{00}]\\\\approx0$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi/\\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(222, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapR1, rstride=5, cstride=5, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapR1.min(), vmax=zzOlapR1.max())\n fig1.set_zlim(-1,1)\n fig1.set_title(\"b) Re$[(\\\\tilde O($\\\"$1$\\\"$)-\\\\tilde {\\cal I}_{1R})/\\\\tilde {\\cal I}_{11}]\\\\approx\\sin(\\\\theta)\\cdot\\cos(\\phi)$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi / \\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(224, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapI1, rstride=5, cstride=5, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapI1.min(), vmax=zzOlapI1.max())\n fig1.set_zlim(-1,1)\n # fig1.set_title(\"a) Re$[(O_{\\mathbb{C}}(\"0\")-\\\\tilde {\\¢al I}_{0R})/\\\\tilde {\\¢al I}_{00}]$\",fontsize=fs)\n fig1.set_title(\"d) Im$[(\\\\tilde O($\\\"$1$\\\"$)-\\\\tilde {\\cal I}_{1R})/\\\\tilde {\\cal I}_{11}]\\\\approx\\sin(\\\\theta)\\cdot\\sin(\\phi)$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi/\\pi$\",fontsize=fs)\n\n else:\n \n# x_expect = 2e0*sp.real(FuncInfoOlap [0,:,:]*FuncInfoOlap [1,:,:])\n# \n\n x = 2e0*sp.real(spos [0,:,:].conj()*spos [1,:,:])\n y = 2e0*sp.real(spos [0,:,:].conj()*spos [1,:,:]/1.0j)\n z = sp.absolute(spos [0,:,:])**2 -sp.absolute(spos [1,:,:])**2\n\n xprime = 2e0*sp.real(FuncInfoOlap [0,:,:].conj()*FuncInfoOlap [1,:,:])\n yprime = 2e0*sp.real(FuncInfoOlap [0,:,:].conj()*FuncInfoOlap [1,:,:]/1.0j)\n zprime = sp.absolute(FuncInfoOlap [0,:,:])**2 -sp.absolute(FuncInfoOlap [1,:,:])**2\n \n myDensity = sp.sqrt(sp.absolute(x-xprime)**2 + sp.absolute(y-yprime)**2 + sp.absolute(z-zprime)**2)\n# myDensity = 1.0 - (x*xprime + y*yprime + z*zprime) \n \n cm = plt.cm.get_cmap(myMap) \n myMin = myDensity.min()\n myMax = max(sp.absolute(myDensity.max()),sp.absolute(myDensity.min()))\n myColors = cm(myDensity/myMax)\n m = plt.cm.ScalarMappable(cmap=myMap)\n\n fig3D = fig.add_subplot(1, 1, 1, projection='3d')\n\n surf = fig3D.plot_surface(xprime, yprime, zprime, rstride=1, cstride=1, linewidth=1, color=\"black\", \n facecolors=myColors,shade=False,antialiased=True,\n vmin=myMin, vmax=myMax)\n# fig3D.plot_wireframe(x*1.01, y*1.01, z*1.01, rstride=1, cstride=1,alpha=1,linewidth=1,color=\"black\")\n tick2=(myMin+myMax)/2.0\n tick1=round_sig(myMin+(myMax-myMin)*0.1,1)\n tick3=round_sig(myMax-(myMax-myMin)*0.1,2)\n tick2=round_sig(tick2,2)\n print tick1,tick2,tick3\n\n m.set_array(myDensity)\n cb= plt.colorbar(m,shrink=0.5,aspect=7,ticks=([tick1,tick2,tick3])) \n cb.formatter.set_scientific(True) \n cb.formatter.set_powerlimits((0, 0))\n cb.update_ticks()\n \n fig3D.set_xlabel(\"$\\langle \\sigma_x(\\gamma^\\prime,\\delta^\\prime) \\\\rangle$\", fontsize=fs)\n fig3D.set_ylabel(\"$\\langle \\sigma_y(\\gamma^\\prime,\\delta^\\prime) \\\\rangle$\", fontsize=fs)\n fig3D.set_zlabel(\"$\\langle \\sigma_z(\\gamma^\\prime,\\delta^\\prime) \\\\rangle$\", fontsize=fs)\n \n fig3D.set_xticks([-1,0,1])\n fig3D.set_xlim([-1.01,1.01])\n fig3D.xaxis._axinfo['label']['space_factor'] = 2.0\n\n fig3D.set_yticks([-1,0,1])\n fig3D.set_ylim([-1.01,1.01])\n fig3D.yaxis._axinfo['label']['space_factor'] = 2.0\n\n fig3D.set_zticks([-1,0,1])\n fig3D.set_zlim([-1.01,1.01])\n fig3D.zaxis._axinfo['label']['space_factor'] = 2.0\n\n \n plt.show()\n\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.5427260994911194, "alphanum_fraction": 0.5791612267494202, "avg_line_length": 33.65909194946289, "blob_id": "7167d4de3905e314dd7a4975bccd4704718575c9", "content_id": "4c17839d92f7e4fe5688a91a635fdab90f8a9e4b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7630, "license_type": "no_license", "max_line_length": 146, "num_lines": 220, "path": "/python/SpinAnalysis.py~", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nfrom scipy.integrate import cumtrapz\nfrom math import acos,asin\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib import font_manager\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\n\nfrom IOHelper import replace_in_file\nimport IOHelper\nfrom subprocess import Popen, PIPE, call\n\nfrom math import log10, floor\n\n\n##########################################################################################\n### main routine #########################################################################\ndef make_colormap(seq):\n \"\"\"Return a LinearSegmentedColormap\n seq: a sequence of floats and RGB-tuples. The floats should be increasing\n and in the interval (0,1).\n \"\"\"\n seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]\n cdict = {'red': [], 'green': [], 'blue': []}\n for i, item in enumerate(seq):\n if isinstance(item, float):\n r1, g1, b1 = seq[i - 1]\n r2, g2, b2 = seq[i + 1]\n cdict['red'].append([item, r1, r2])\n cdict['green'].append([item, g1, g2])\n cdict['blue'].append([item, b1, b2])\n return mcolors.LinearSegmentedColormap('CustomMap', cdict)\n\ndef round_sig(x, sig=2):\n return round(x, sig-int(floor(log10(x)))-1)\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/spinanalysis.cfg\",myMap=\"jet\",generate=1,plottype=0,w1=2400,w2=2450,w3=2500,w4=2550,w5=2600,cut=0,nano=1):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n cfg=IOHelper.loadSpin(wd,cfg)\n\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n ### read config file ###\n\n if (generate==1):\n cmd = \"./scripts/ifort-spinAnalysis.sh \" + wd\n print (\"compile fortran routines: \"+cmd)\n call(cmd.split())\n\n print (\"### call SpinAnalysis\")\n cmd=wd+\"SpinAnalysis\"\n analysis = Popen(cmd.split(), stdin=PIPE) # run fortran program with piped standard input\n output = analysis.communicate()[0]\n# generateInput.wait()\n \n filebase =\"./thesis/dat/decay/sin_drive_00h_A0_0.0_0.0i_\"\n\n filename= filebase+\"cavity.dat\"\n time,__,cavityR,cavityI = sp.loadtxt(filename).T\n cavity = cavityR + 1j*cavityI\n\n filename= filebase+\"spin.dat\"\n times,__, spinR, spinI = sp.loadtxt(filename).T\n spin = spinR + 1j*spinI\n\n tott = int(cfg['MESpin']['{timecnt_analysis}'])\n timeBinSpin = int(cfg['MESpin']['{timebin_spin}'])\n\n filename = filebase + \"spin_%#07.2fns.dat\" % (times[-1])\n omega,__,__ = sp.loadtxt(filename).T\n\n spin2d = sp.zeros([sp.size(times),sp.size(omega)],complex)\n\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n omega *= omega_c\n\n print \"### load data ...\"\n myMin = 0\n myMax = 0\n for t in sp.linspace(0,sp.size(times)-1,sp.size(times)):\n filename = filebase + \"spin_%#07.2fns.dat\" % (times[t])\n\n __,spinSlideR,spinSlideI = sp.loadtxt(filename).T\n spin2d[t,:]=spinSlideR+1j*spinSlideI \n\n if (min(min(spinSlideR),min(spinSlideI)) < myMin): myMin = min(min(spinSlideR),min(spinSlideI))\n if (max(max(spinSlideR),max(spinSlideI)) > myMax): myMax = max(max(spinSlideR),max(spinSlideI))\n \n# print \"{0:05.2f}\".format(int(t))\n# do t=1,tott,timeBinSpin\n# tmp = nint(mytime(t)*1d2)\n# write(timerange,\"(I0.4,F0.2,A)\") int(tmp/1d2),tmp/1d2-int(tmp/1d2),\"ns\"\n# write(*,\"(1A1,A,I,A,I,$)\") char(13),\"--- spin dynamics at \"//trim(timerange)//\":\", t, \" of \", size(mytime)\n\n myMax = max(sp.absolute(myMin),sp.absolute(myMax))\n myMin = -myMax\n\n fs=20\n label_size = 20\n plt.rcParams['xtick.labelsize'] = label_size \n plt.rcParams['ytick.labelsize'] = label_size \n\n if (nano==1):\n timelabel = \"ns\"\n else :\n time *= 1e-3\n times *= 1e-3\n cut *= 1e-3\n timelabel = \"$\\mu$s\"\n\n if (plottype == 0): \n plt.plot(time,sp.absolute(cavity)**2, color=\"green\",linewidth=2.0, label=\"cavity-amplitude\")\n plt.plot(times,sp.absolute(spin)**2, color=\"red\",linewidth=2.0, label=\"collective spin-wave\")\n plt.plot([cut,cut],[omega[-1],omega[1]],\"k--\")\n plt.ylabel(\"$|A(t)|^2$; $|S_x(t)|^2$\",fontsize=fs)\n plt.xlabel(\"$t$ in \"+timelabel,fontsize=fs)\n plt.legend()\n\n elif (plottype == 1): \n plt.subplot(1,3,1)\n plt.plot(times,sp.real(spin2d[:,w1]))\n plt.plot(times,sp.real(spin2d[:,w2]))\n plt.plot(times,sp.real(spin2d[:,w3]))\n plt.plot(times,sp.real(spin2d[:,w4]))\n plt.plot(times,sp.real(spin2d[:,w5]))\n plt.ylabel(\"$|B_k(t)|^2$\",fontsize=fs)\n plt.title(\"Real part\")\n plt.xlabel(\"$t$ in \"+timelabel,fontsize=fs)\n\n plt.subplot(1,3,2)\n plt.plot(times,sp.imag(spin2d[:,w1]))\n plt.plot(times,sp.imag(spin2d[:,w2]))\n plt.plot(times,sp.imag(spin2d[:,w3]))\n plt.plot(times,sp.imag(spin2d[:,w4]))\n plt.plot(times,sp.imag(spin2d[:,w5]))\n plt.title(\"Imaginary part\")\n plt.xlabel(\"$t$ in \"+timelabel,fontsize=fs)\n\n plt.subplot(1,3,3)\n plt.plot(times,sp.absolute(spin2d[:,w1]+spin2d[:,w2]+spin2d[:,w3]+spin2d[:,w4]+spin2d[:,w5])**2/5.0)\n plt.title(\"Collective spin\")\n plt.xlabel(\"$t$ in \"+timelabel,fontsize=fs)\n\n elif (plottype == 2): \n ### prepare colormap ###################################################################\n fig0_colors=sp.linspace(myMin,myMax,100)\n ### prepare colormap ###################################################################\n\n c = mcolors.ColorConverter().to_rgb\n\n if (myMap == \"custom\"):\n cm = make_colormap([c('cyan'), c('black'), 0.499, c('black'), 0.501, c('black'), c('magenta')])\n else:\n cm = plt.cm.get_cmap(myMap) \n\n otick = [round_sig(0.995*omega_c,4), \n round_sig(0.9975*omega_c,4),\n round_sig(omega_c,4), \n round_sig(1.0025*omega_c,4),\n round_sig(1.005*omega_c,4)]\n\n fig = plt.figure()\n fig0 = fig.add_subplot(121)\n fig0.invert_yaxis()\n fig0.contourf(times,omega,spin2d.real.T,fig0_colors,cmap=cm,vmin=myMin*0.6,vmax=myMax*0.6)\n fig0.plot([cut,cut],[omega[-1],omega[1]],color=\"white\")\n fig0.set_xlabel(\"$t$ in \"+timelabel, fontsize=fs)\n fig0.set_xlim(times[0],times[-1])\n fig0.set_ylabel(\"$\\omega$ in GHz\", fontsize=fs)\n fig0.set_ylim([otick[0],otick[-1]])\n fig0.set_yticks(otick)\n fig0.tick_params(direction='out')\n fig1.title(\"b) Real part\")\n\n fig1 = fig.add_subplot(122)\n fig1.invert_yaxis()\n cp1=fig1.contourf(times,omega,spin2d.imag.T,fig0_colors,cmap=cm,vmin=myMin*0.6,vmax=myMax*0.6)\n fig1.plot([cut,cut],[omega[-1],omega[1]],color=\"white\")\n fig1.set_xlabel(\"$t$ in \"+timelabel, fontsize=fs)\n fig1.set_xlim(times[0],times[-1])\n fig1.set_ylim([otick[0],otick[-1]])\n fig1.set_yticks(otick)\n fig1.axes.yaxis.set_ticklabels([])\n fig1.tick_params(direction='out')\n fig1.title(\"b) Imaginary part\")\n\n\n cbtick = [-round_sig(myMax*0.99,2), \n -round_sig(myMax/2,2),\n 0.0, \n round_sig(myMax/2,2),\n round_sig(myMax*0.99,2)]\n\n cb =plt.colorbar(cp1,label=\"Spin-wave amplitude\",ticks=cbtick)\n ax = cb.ax\n text = ax.yaxis.label\n font = font_manager.FontProperties(size=fs)\n text.set_font_properties(font)\n cb.update_ticks()\n \n \n \n plt.show ()\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.578761875629425, "alphanum_fraction": 0.6133925914764404, "avg_line_length": 35.634830474853516, "blob_id": "749cd29bb16092dc61d3ca59c9de7cadc06d1a22", "content_id": "201e58339af2e6fde2da288055927c1386766927", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6526, "license_type": "no_license", "max_line_length": 172, "num_lines": 178, "path": "/python/SmallestOverlapPlot.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\nimport IOHelper\n\n\n##########################################################################################\n### main routine #########################################################################\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (cfg=\"./python/parameter.cfg\",start=None,stop=None,inset=0,pulse=0):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nWrite =int(cfg['OCFourier']['{write_harmonic}'])\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data for functional variation ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n pulseDown,pulseUp,pulseRead =IOHelper.harmonics_readwrite_pulse(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n omega, rho =IOHelper.nv_density(**cfg)\n time['write'][:] *= 1e6\n time['read'] [:] *= 1e6\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf]\n ### read data for functional variation ###\n\n\n filename=IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n\n alphaR=reGamma[0:nRead]-1j*imGamma[0:nRead]\n alphaD=reGamma[nRead:nDown]-1j*imGamma[nRead:nDown]\n alphaU=reGamma[nDown:nUp]-1j*imGamma[nDown:nUp]\n\n ### plotting\n cavityWriteDown=sp.dot(alphaD.conj(),cavityWrite)\n cavityWriteUp =sp.dot(alphaU.conj(),cavityWrite)\n\n cavityModeDown=sp.dot(alphaR.conj(),cavityRead)+sp.dot(alphaD.conj(),cavityMemo)\n cavityModeUp =sp.dot(alphaR.conj(),cavityRead)+sp.dot(alphaU.conj(),cavityMemo)\n\n fs=20\n label_size = 20\n plt.rcParams['xtick.labelsize'] = label_size \n plt.rcParams['ytick.labelsize'] = label_size \n\n if (start is None):\n start=time['write'][0]\n if (stop is None):\n stop=functime[-1]\n\n# myXticks=[0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,.8,0.9]\n\n if pulse == 0:\n plt.subplot(4,3,1)\n plt.bar(sp.arange(1,nRead+1,1),alphaR.real,color=\"darkgray\")\n plt.title(\"$\\\\alpha_{R}$\",fontsize=fs)\n plt.ylabel(\"real part\",fontsize=fs)\n plt.subplot(4,3,4)\n plt.bar(sp.arange(1,nRead+1,1),alphaR.imag,color=\"darkgray\")\n plt.ylabel(\"imag part\",fontsize=fs)\n\n plt.subplot(4,3,2)\n plt.bar(sp.arange(1,nWrite+1,1),alphaD.real)\n plt.title(\"$\\\\alpha_{0}$\",fontsize=fs)\n plt.subplot(4,3,5)\n plt.bar(sp.arange(1,nWrite+1,1),alphaD.imag)\n\n plt.subplot(4,3,3)\n plt.bar(sp.arange(1,nWrite+1,1),alphaU.real,color=\"red\")\n plt.title(\"$\\\\alpha_{1}$\",fontsize=fs)\n plt.subplot(4,3,6)\n plt.bar(sp.arange(1,nWrite+1,1),alphaU.imag,color=\"red\")\n else:\n pulseMax = max(max(pulseUp.real),max(pulseUp.imag),max(pulseDown.real),max(pulseDown.imag),max(pulseRead.real),max(pulseRead.imag))\n pulseMin = min(min(pulseUp.real),min(pulseUp.imag),min(pulseDown.real),min(pulseDown.imag),min(pulseRead.real),min(pulseRead.imag))\n pulseMM = max(sp.absolute(pulseMax),sp.absolute(pulseMin))\n\n ntWrite = time['write'].size\n ntRead = time['read'].size\n\n p=plt.subplot2grid((4,3),(0,0),colspan=4)\n plt.plot(time['write'],pulseDown.imag, color=\"cyan\",linewidth=2.0)\n plt.plot(time['write'],pulseDown.real, color=\"blue\",linewidth=2.0)\n plt.plot(time['read'],pulseRead.imag, color=\"darkgray\",linewidth=2.0)\n plt.plot(time['read'],pulseRead.real, color=\"black\",linewidth=2.0)\n plt.plot([time['read'][0],time['read'][0]], [-1.1*pulseMM,1.1*pulseMM], 'k--')\n plt.ylabel(\"$\\eta(t)$\",fontsize=fs)\n plt.yticks([-1,0,1])\n plt.ylim([-1.1*pulseMM,1.1*pulseMM])\n# plt.xticks(myXticks)\n p.set_xticklabels([])\n plt.xlim([start,stop])\n\n p=plt.subplot2grid((4,3),(1,0),colspan=4)\n plt.plot(time['write'],pulseUp.imag, color=\"orange\",linewidth=2.0)\n plt.plot(time['write'],pulseUp.real, color=\"red\",linewidth=2.0)\n plt.plot(time['read'],pulseRead.imag, color=\"darkgray\",linewidth=2.0)\n plt.plot(time['read'],pulseRead.real, color=\"black\",linewidth=2.0)\n plt.plot([time['read'][0],time['read'][0]], [-1.1*pulseMM,1.1*pulseMM], 'k--')\n plt.ylabel(\"$\\eta(t)$\",fontsize=fs)\n plt.yticks([-1,0,1])\n plt.ylim([-1.1*pulseMM,1.1*pulseMM])\n# plt.xticks(myXticks)\n p.set_xticklabels([])\n plt.xlim([start,stop])\n\n\n plt.subplot2grid((4,3),(2,0),colspan=4,rowspan=2)\n\n cavityMax = max(max(sp.absolute(cavityWriteDown[:])**2),max(sp.absolute(cavityWriteUp[:])**2),max(sp.absolute(cavityModeDown[:])**2),max(sp.absolute(cavityModeUp[:])**2))\n ymax=1.1*cavityMax # 1.25\n\n plt.plot(time['write'][:],sp.absolute(cavityWriteDown[:])**2, color=\"blue\",linewidth=2.0)\n plt.plot(time['read'][:],sp.absolute(cavityModeDown[:])**2,label=\"state '0'\", color=\"blue\",linewidth=2.0)\n\n plt.plot(time['write'][:],sp.absolute(cavityWriteUp[:])**2, color=\"red\",linewidth=2.0)\n plt.plot(time['read'][:],sp.absolute(cavityModeUp[:])**2 ,label=\"state '1'\", color=\"red\",linewidth=2.0)\n\n plt.fill_between(functime, ymax, 0.0, color='lightgray', facecolor='lightgray', alpha=0.5)\n plt.plot([time['read'][0],time['read'][0]], [0.0,ymax], 'k--')\n\n plt.legend()\n plt.xlabel('$t$ in $\\mu$s',fontsize=fs)\n# plt.xlabel('$t$ in ns',fontsize=fs)\n plt.ylabel('$|A(t)|^2$',fontsize=fs)\n plt.yticks([0,1,2,3])\n plt.ylim([0,ymax])\n# plt.xticks(myXticks)\n\n plt.xlim([start,stop])\n\n if inset != 0:\n # this is an inset axes over the main axes\n a = plt.axes([.4, .25, .23, .2])\n plt.plot(omega,rho,linewidth=2,color=\"black\")\n plt.xlim(0.985,1.015)\n plt.xticks([0.99,1,1.01])\n plt.yticks([0,125,250])\n plt.ylabel('$\\\\rho(\\omega)$', fontsize=fs)\n plt.xlabel('$\\omega/\\omega_s$', fontsize=fs)\n# plt.title('density of states')\n\n\n# plt.plot([cut1,cut1],[0,cavityMax*1.1],linewidth=2.0)\n# plt.plot([cut2,cut2],[0,cavityMax*1.1],linewidth=2.0)\n# plt.plot([cut3,cut3],[0,cavityMax*1.1],linewidth=2.0)\n\n plt.show()\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.5461615324020386, "alphanum_fraction": 0.5662813782691956, "avg_line_length": 40.82984924316406, "blob_id": "c4b1abb64a49c2d64b74432e5eab69d01f78d129", "content_id": "95a3cc5abfdeadf98da634744cace357d9c2efe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14016, "license_type": "no_license", "max_line_length": 156, "num_lines": 335, "path": "/python/MemoryPulseEvaluate.py~", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nimport scipy.linalg as la\nfrom subprocess import Popen, PIPE, call\nimport matplotlib.pyplot as plt\n\nimport IOHelper\nfrom IOHelper import replace_in_file\nimport MemoryPulseFunctional\nfrom MemoryPulseFunctional import cons, fun, dim, conf\n\n\ndef functionalPlot (alpha,beta,cut1,cut2,cut3,wd,**cfg):\n global dim\n prefix =cfg['FILES']['{prefix}']\n postfix =cfg['FILES']['{postfix}']\n name_optimized=cfg['FILES']['{name_optimized}']\n\n name_spin =cfg['FILES']['{name_spin}']\n name_cavity =cfg['FILES']['{name_cavity}']\n\n name_readwrite=cfg['FILES']['{name_readwrite}']\n name_storage =cfg['FILES']['{name_storage}'] \n\n cmd=wd+\"memoryOptimized\"\n call(cmd.split())\n\n font = {\n 'fontsize' : 20\n }\n #trim(filename)//trim(nameOptimized)//trim(nameSpin)//trim(stateKey)//trim(postfix)\n\n filename = prefix+name_readwrite+name_optimized+name_spin\n omega,I1_Re_U,I1_Im_U,B1_Re_U,B1_Im_U = sp.loadtxt(filename+\"up\"+postfix).T\n omega,I1_Re_D,I1_Im_D,B1_Re_D,B1_Im_D = sp.loadtxt(filename+\"down\"+postfix).T\n\n\n filename = prefix+name_readwrite+name_storage+name_optimized\n\n omega,I2_Re_U,I2_Im_U,B2_Re_U,B2_Im_U = sp.loadtxt(filename+name_spin+\"up\"+postfix).T\n omega,I2_Re_D,I2_Im_D,B2_Re_D,B2_Im_D = sp.loadtxt(filename+name_spin+\"down\"+postfix).T\n\n time,A_Re_U,A_Im_U = sp.loadtxt(filename+\"cavityMode_up\"+postfix).T\n time,A_Re_D,A_Im_D = sp.loadtxt(filename+\"cavityMode_down\"+postfix).T\n cavityMode_U = sp.absolute(A_Re_U + 1j*A_Im_U)**2\n cavityMode_D = sp.absolute(A_Re_D + 1j*A_Im_D)**2\n\n cavityMax = max(max(cavityMode_U),max(cavityMode_D))\n\n B1_U = (B1_Re_U+1j*B1_Im_U)*(sp.conj(beta))#**(n-1) #\n B2_U = (B2_Re_U+1j*B2_Im_U)\n\n B1_D = (B1_Re_D+1j*B1_Im_D)*(sp.conj(beta))#**(n-1) #*beta.conj()\n B2_D = (B2_Re_D+1j*B2_Im_D)\n\n plt.subplot2grid((7,2),(0,0))\n plt.bar(sp.arange(1,dim['alpha']+1,1),alpha.real,color='g')\n plt.title(\"$\\\\alpha$\",**font)\n plt.ylabel(\"Re$[\\\\alpha]$\",**font)\n\n plt.subplot2grid((7,2),(0,1))\n plt.bar(sp.arange(1,dim['alpha']+1,1),alpha.conj().imag,color='r')\n plt.title(\"$\\\\alpha$\",**font)\n plt.ylabel(\"Im$[\\\\alpha]$\",**font)\n\n plt.subplot2grid((7,2),(1,0),rowspan=2)\n plt.ylabel(\"Re$[B_{\\\\omega}(t)]$\",**font)\n plt.plot(omega[:],B2_U[:].real,label=\"$B^{1}_{\\\\omega}(T_2)$\", linewidth=2.0, color='g')\n plt.plot(omega[:],B1_U[:].real,label=\"$\\\\beta^*\\cdot B^{1}_{\\\\omega}(T_1)$\", linewidth=2.0, color=\"black\")\n plt.legend(**font)\n\n plt.subplot2grid((7,2),(3,0),rowspan=2)\n plt.ylabel(\"Re$[B_{\\\\omega}(t)]$\",**font)\n plt.plot(omega[:],B2_D[:].real,label=\"$B^{0}_{\\\\omega}(T_2)$\", linewidth=2.0, color='g')\n plt.plot(omega[:],B1_D[:].real,label=\"$\\\\beta^*\\cdot B^{0}_{\\\\omega}(T_1)$\", linewidth=2.0, color=\"black\")\n plt.legend(**font)\n plt.xlabel(\"$\\\\omega/\\\\omega_c$\",**font)\n\n plt.subplot2grid((7,2),(1,1),rowspan=2)\n plt.plot(omega[:],B2_U[:].imag,label=\"$B^{1}_{\\\\omega}(T_2)$\", linewidth=2.0, color='r')\n plt.plot(omega[:],B1_U[:].imag,label=\"$\\\\beta^*\\cdot B^{1}_{\\\\omega}(T_1)$\", linewidth=2.0, color=\"black\")\n plt.legend(**font)\n plt.ylabel(\"Im$[B_{\\\\omega}(t)]$\",**font)\n\n plt.subplot2grid((7,2),(3,1),rowspan=2)\n plt.plot(omega[:],B2_D[:].imag,label=\"$B^{0}_{\\\\omega}(T_2)$\", linewidth=2.0, color='r')\n plt.plot(omega[:],B1_D[:].imag,label=\"$\\\\beta^*\\cdot B^{0}_{\\\\omega}(T_1)$\", linewidth=2.0, color=\"black\")\n plt.legend(**font)\n plt.ylabel(\"Im$[B_{\\\\omega}(t)]$\",**font)\n plt.xlabel(\"$\\\\omega/\\\\omega_c$\",**font)\n\n plt.subplot2grid((7,2),(5,0),colspan=2,rowspan=2)\n plt.plot(time[:],cavityMode_U[:],linewidth=2.0,color=\"red\")\n plt.plot(time[:],cavityMode_D[:],linewidth=2.0,color=\"blue\")\n plt.plot([cut1,cut1],[0,cavityMax*1.1],linewidth=2.0,color=\"black\")\n plt.plot([cut2,cut2],[0,cavityMax*1.1],linewidth=2.0,color=\"green\")\n plt.plot([cut3,cut3],[0,cavityMax*1.1],linewidth=2.0,color=\"red\")\n plt.ylabel(\"$|A(t)|^2$\",**font)\n plt.ylim((0,cavityMax*1.1))\n plt.xlabel(\"$t$ in n.s.\",**font)\n plt.xlim((min(time[:]),max(time[:])))\n# plt.xlim((0,270))\n\n plt.show()\n\n\n##########################################################################################\n### main routine #########################################################################\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",generationType=\"p\",\\\n cut1=51.4,cut2=102.8,cut3=154.0,\\\n toMinimize =2,\\\n cavityMatch=1,\\\n silent=0,\\\n useBeta=0,\\\n initialize=None,\\\n varMax=1000):\n print \"#################################################################\"\n print \"#################################################################\"\n print \"### optimal control #############################################\"\n print \"### memory pulse evaluation #####################################\"\n print \"#################################################################\"\n print \"#################################################################\"\n\n ### globals for functional variation\n global cons, fun, dim, conf\n\n conf['toMinimize'] =toMinimize\n conf['cavityMatch']=cavityMatch\n conf['silent'] =silent\n if (useBeta == 0):\n conf['useBeta'] = False\n else:\n conf['useBeta'] = True\n\n\n ### globals for functional variation\n\n ### generate working environment ###\n print (\"### working directory: \" + wd)\n tmpDir = wd+\"tmp/\" \n cmd = \"mkdir -p \" + tmpDir\n call(cmd.split())\n ### generate working environment ###\n\n ### read config file ###\n print (\"### load config file: \" + cfg)\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n name_readwrite=IOHelper.getNameReadWrite(**cfg) \n name_storage =IOHelper.getNameStorage (**cfg) \n name_vector =IOHelper.getVectorMemory (**cfg)\n myTime =IOHelper.functionaltimes_readwrite(**cfg) # reads time and updates cfg: \n # cfg['METime']['{fidelity_ti}'] = myTime['idx_ti']\n # cfg['METime']['{fidelity_tf}'] = myTime['idx_tf']\n\n conf['MEConstraints'] = cfg['MEConstraints'].copy()\n conf['FITNESS']= cfg['FITNESS'].copy()\n conf['FITNESS']['{mutationrate}']=sp.zeros([conf['entries']])\n conf['FITNESS']['{mutationrate}'][conf['funval']] =cfg['FITNESS']['{mut_functional}']\n conf['FITNESS']['{mutationrate}'][conf['fidelity_down']]=cfg['FITNESS']['{mut_fidelity_down}']\n conf['FITNESS']['{mutationrate}'][conf['fidelity_up']] =cfg['FITNESS']['{mut_fidelity_up}']\n conf['FITNESS']['{mutationrate}'][conf['memolap']] =cfg['FITNESS']['{mut_memolap}']\n conf['FITNESS']['{mutationrate}'][conf['alpha']] =cfg['FITNESS']['{mut_alpha}']\n conf['FITNESS']['{mutationrate}'][conf['beta']] =cfg['FITNESS']['{mut_beta}']\n conf['FITNESS']['{mutationrate}'][conf['success']] =cfg['FITNESS']['{mut_success}']\n\n cons['alpha_norm']=float(cfg['MEConstraints'][\"{storage_amplitude}\"])\n cons['beta_low'] =float(cfg['MEConstraints'][\"{limit_low_beta}\"])\n cons['beta_top'] =float(cfg['MEConstraints'][\"{limit_top_beta}\"])\n cons['chi2_Tol'] =float(cfg['MEConstraints']['{tol_chi2}'])\n\n dim['alpha']=int(cfg['MEFourier']['{storage_harmonic}'])\n dim['total']=dim['alpha']+3\n\n ### prepare and complie fortran routines ###\n print (\"### prepare fortran routines\")\n replace_in_file('./python/py.parNvCenter.F95' , tmpDir +'parNvCenter.F95' , **cfg['NVSETUP'])\n replace_in_file('./python/py.parMemoryPulse.F95', tmpDir +'parMemoryPulse.F95', **cfg['MEFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCFourier'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MESpin'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['MEConstraints'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCConstraints'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['METime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['OCTime'])\n replace_in_file(tmpDir +'parMemoryPulse.F95' , tmpDir +'parMemoryPulse.F95', **cfg['FILES'])\n\n #write config file\n with open(cfg['FILES']['{prefix}']+\"parameter.cfg\", 'wb') as configfile:\n configParser.write(configfile)\n ### read config file ###\n\n cmd = \"mv \"+tmpDir+\"parMemoryPulse.F95 \"+wd+\"srcOptCntrl/parMemoryPulse.F95\"\n call(cmd.split())\n cmd = \"mv \"+tmpDir+\"parNvCenter.F95 \"+wd+\"srcNv/parNvCenter.F95\"\n call(cmd.split())\n\n print (\"### compile fortran routines\")\n cmd = \"./scripts/ifort-memoryHarmonics.sh \" + wd\n call(cmd.split())\n\n\n if (generationType == \"p\"):\n alphaR,alphaI = sp.loadtxt(name_vector).T\n alpha=alphaR[:dim['alpha']]-1j*alphaI[:dim['alpha']] # re - i*im is correct since I store alpha.conj() !\n beta =alphaR[-1] - 1j*alphaI[-1]\n functionalPlot (alpha,beta,cut1,cut2,cut3,wd,**cfg)\n else:\n\n print (\"### invoke fortran routines\")\n print (\"### generation Type: \" + generationType)\n cmd = wd+\"memoryHarmonics\" # location of executable fortran program\n generateHarmonics = Popen(cmd.split(), stdin=PIPE) # run fortran program with piped standard input\n cmd = \"echo \" + generationType # communication with fortran-routine: chose action -> read or generate\n generateInput = Popen(cmd.split(), stdout=generateHarmonics.stdin) # send action to fortran program\n output = generateHarmonics.communicate()[0]\n generateInput.wait()\n ### prepare and complie fortran routines ###\n\n ### read data for functional variation ###\n cons['cavityT2_down'], \\\n cons['cavityT2_up'] = IOHelper.read_CavityMemory (**cfg['FILES'])\n\n fun ['mtrxBeta_up'], \\\n cons['vecT2_up'] , \\\n cons['fidelity_up'], \\\n cons['mtrxMemOlap'] = IOHelper.read_MtrxMemory(\"up\", **cfg['FILES']) \n\n fun ['mtrxBeta_down'], \\\n cons['vecT2_down'] , \\\n cons['fidelity_down'], \\\n __ = IOHelper.read_MtrxMemory(\"down\", **cfg['FILES']) \n\n cons['hole'] = IOHelper.read_HoleData(storage=cfg['FILES']['{name_storage}'],**cfg['FILES']) \n\n cons['fidelity_Tol_Low'] = cfg['FITNESS']['{lim_fidelity_low}']\n cons['fidelity_Tol_Top'] = cfg['FITNESS']['{lim_fidelity_top}']\n ### read data for functional variation ###\n\n ### functional variation ###\n print (\"\\n### start minimization: \")\n\n varStep = 0\n exit = False\n minfun = dict()\n fitness = -1.0\n if (silent == 0):\n disp = True\n else:\n disp = False\n\n if (initialize != None):\n cfg['dim_grid'] =varMax\n name_varinit =IOHelper.getNameInitialFittest(**cfg)\n rawAplha0 = sp.loadtxt(name_varinit)+0j\n varMax=len(rawAplha0)\n\n if (disp):\n print \"## initialize from file: \" + name_varinit\n print \"## testing {:} initial conditions\".format(varMax)\n\n while varStep < varMax and not exit :\n if (initialize == \"spherical\"):\n print (\"### initialize spherical variational vector {:}\".format(varStep))\n alpha0=rawAplha0[varStep]\n else:\n try:\n alpha0=rawAplha0[int(initialize)]\n exit=True\n if (disp):\n print (\"### initialize variational vector {:}\".format(int(initialize)))\n except:\n if (disp):\n print (\"### initialize variational vector: random\")\n alpha0=sp.random.random_sample([dim['alpha']])+sp.random.random_sample([dim['alpha']])*1j\n\n alpha0[:]=alpha0[:]/la.norm(alpha0[:])*cons['alpha_norm'] # normalize coefficients for alpha -> defines net-power\n res,fitRes=MemoryPulseFunctional.evaluateFunctional(alpha0,0.5+0j)\n\n if res[conf['fitness']] > fitness:\n fitness = res[conf['fitness']]\n\n if (fitness > 0.0): \n print \"### aquired good fitness !!! \"\n print \"### save coefficient vector alpha and phase beta\"\n\n output=sp.zeros([dim['alpha']+1],complex)\n output[:dim['alpha']]=res[conf[\"alpha\"]]\n output[-1]=res[conf[\"beta\"]]\n\n sp.savetxt(name_vector,sp.array([output.real,output.conj().imag]).T) # [real(gamma), imag(gamma)].conj()\n sp.savetxt(IOHelper.getFitnessMemory(**cfg),fitRes,header='fitness; success; norm[alpha]; abs[beta]; memolap; fidelity_up; fidelity_down; minfun')\n if (initialize == \"spherical\"):\n myname = name_vector+\"{:4}\".format(varStep)\n else:\n myname = name_vector\n sp.savetxt(myname,sp.array([output.real,output.conj().imag]).T) # [real(gamma), imag(gamma)].conj()\n\n print \"### done with minimization\"\n if (disp):\n functionalPlot(res[conf['alpha']],res[conf['beta']],cut1,cut2,cut3,wd,**cfg)\n else:\n cmd=wd+\"memoryOptimized\"\n call(cmd.split())\n\n exit=True\n\n if (initialize == \"spherical\"):\n exit=False\n if (fitness == 0.0):\n print \"### did not aquired good fitness for spherical initialization !!! \"\n fitness=-1.0\n \n if (varStep == 0 or minfun[conf[\"funval\"]]>res[conf[\"funval\"]]):\n minfun=res\n\n varStep+=1\n\n if initialize != \"spherical\" and not exit:\n print \"\\n### done with minimization without solution\"\n\n print \"#################################################################\"\n print \"#################################################################\"\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n" }, { "alpha_fraction": 0.7574370503425598, "alphanum_fraction": 0.7940503358840942, "avg_line_length": 53.5, "blob_id": "36dd95ed2f73ce29d9a6b933109bbc3ada524d87", "content_id": "c371d7d7cd663a23110020592b7ef1f68fc840ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 437, "license_type": "no_license", "max_line_length": 186, "num_lines": 8, "path": "/scripts/parallel-ifort-generateHarmonics.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm -f generateHarmonics\nrm -f generateOptimized\n\nifort -heap-arrays ./srcRand/MersenneTwister.F90 -Tf ./srcNv/modNvCenter.F95 -Tf ./srcOptCntrl/modSmallestOverlap.F95 -Tf ./srcMain/main_generateHarmonics.F95 -free -o generateHarmonics \n\nifort -heap-arrays ./srcRand/MersenneTwister.F90 -Tf ./srcNv/modNvCenter.F95 -Tf ./srcOptCntrl/modSmallestOverlap.F95 -Tf ./srcMain/main_generateOptimized.F95 -free -o generateOptimized \n" }, { "alpha_fraction": 0.7052810788154602, "alphanum_fraction": 0.7597955465316772, "avg_line_length": 57.599998474121094, "blob_id": "d04a6a6a86aa578bd2dd953956819c862c85267c", "content_id": "2b86eca415492cf7ffa557100483ecefd7a0be37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 587, "license_type": "no_license", "max_line_length": 173, "num_lines": 10, "path": "/scripts/ifort-superimposeHarmonics.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm -f \"$1\"generateHarmonics\nrm -f \"$1\"generateOptimized\n\nifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modMemoryPulse.F95 -Tf \"$1\"srcMain/main_memoryTimedelaySuperimposed.F95 -free -o \"$1\"delaySuperimposed \n\nifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modMemoryPulse.F95 -Tf \"$1\"srcMain/main_memorySuperimposed.F95 -free -o \"$1\"memorySuperimposed \n\n#ifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modSmallestOverlap.F95 -Tf \"$1\"srcMain/main_generateSuperimposed.F95 -free -o \"$1\"generateSuperimposed \n" }, { "alpha_fraction": 0.6771300435066223, "alphanum_fraction": 0.7399103045463562, "avg_line_length": 43.400001525878906, "blob_id": "d2bed47c0b3b9e175b96797d8f5256fdaf0fa87e", "content_id": "d0ca3198832d3dbb3fbeec30ffb923629053ba41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 223, "license_type": "no_license", "max_line_length": 181, "num_lines": 5, "path": "/scripts/ifort-memoryCheck.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm -f \"$1\"memoryHarmonics\n\nifort -heap-arrays \"$1\"srcRand/MersenneTwister.F90 -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modMemoryPulse.F95 -Tf \"$1\"srcMain/main_memoryCheck.F95 -free -o \"$1\"memoryCheck\n\n" }, { "alpha_fraction": 0.6984401941299438, "alphanum_fraction": 0.7556325793266296, "avg_line_length": 51.45454406738281, "blob_id": "ef9b01c9cfc5a07b12f18049f98d3c1e13974779", "content_id": "c1f38608392a79387ae90866a15d4341315d26f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 577, "license_type": "no_license", "max_line_length": 163, "num_lines": 11, "path": "/scripts/ifort-memoryHarmonics.sh", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm -f \"$1\"memoryHarmonics\nrm -f \"$1\"memoryOptimized\nrm -f \"$1\"memorySuperimposed\n\nifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modMemoryPulse.F95 -Tf \"$1\"srcMain/main_memoryHarmonics.F95 -free -o \"$1\"memoryHarmonics\n\nifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modMemoryPulse.F95 -Tf \"$1\"srcMain/main_memoryOptimized.F95 -free -o \"$1\"memoryOptimized\n\nifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modMemoryPulse.F95 -Tf \"$1\"srcMain/main_memorySuperimposed.F95 -free -o \"$1\"memorySuperimposed\n" }, { "alpha_fraction": 0.547105073928833, "alphanum_fraction": 0.5827019214630127, "avg_line_length": 41.8834342956543, "blob_id": "89523f0bbe7572f3ba3a276a06ddeb8eb3d2639d", "content_id": "2e5489ea3f3ce1a9e90b15dddc1a0f54f656b468", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6997, "license_type": "no_license", "max_line_length": 143, "num_lines": 163, "path": "/python/SmallestOverlapPartialOverlap.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nfrom scipy.integrate import cumtrapz\nfrom math import acos,asin\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\n\nimport IOHelper\n\n\n##########################################################################################\n### main routine #########################################################################\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",thetaCnt=11,phiCnt=11,p3d=0,myMap=\"rainbow\"):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n\n time['write'][:] *= 1e9\n time['read'][:] *= 1e9\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf] \n dt = float(time['delta_t'])\n\n filename =IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead] # /float(cfg['OCConstraints']['{amplitude_up}'])\n alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown] # /float(cfg['OCConstraints']['{amplitude_down}'])\n alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp] # /float(cfg['OCConstraints']['{amplitude_read}'])\n ### read data ###\n\n ### plotting\n Reg1Up = sp.dot(alphaU.conj(),cavityWrite)\n Reg1Down = sp.dot(alphaD.conj(),cavityWrite)\n\n Reg2Down = sp.dot(alphaD.conj(),cavityMemo)\n Reg2Up = sp.dot(alphaU.conj(),cavityMemo)\n Reg2Read = sp.dot(alphaR.conj(),cavityRead)\n\n Reg2DownRead = Reg2Down + Reg2Read\n Reg2UpRead = Reg2Up + Reg2Read\n\n FuncInfoOlap = sp.zeros([2,thetaCnt,phiCnt],complex)\n\n I00 = cumtrapz( (Reg2Down[ti:tf].conj() * Reg2Down[ti:tf]).real, x=None, dx=dt )[-1]\n I0R = 1j*cumtrapz( (Reg2Read[ti:tf].conj() * Reg2Down[ti:tf]).imag, x=None, dx=dt )[-1]\n I0R += cumtrapz( (Reg2Read[ti:tf].conj() * Reg2Down[ti:tf]).real, x=None, dx=dt )[-1]\n\n I11 = cumtrapz( (Reg2Up [ti:tf].conj() * Reg2Up [ti:tf]).real, x=None, dx=dt )[-1]\n I1R = 1j*cumtrapz( (Reg2Read[ti:tf].conj() * Reg2Up [ti:tf]).imag, x=None, dx=dt )[-1]\n I1R += cumtrapz( (Reg2Read[ti:tf].conj() * Reg2Up [ti:tf]).real, x=None, dx=dt )[-1]\n\n fs = 20\n\n for i in sp.arange(0.0,thetaCnt):\n theta = i/(thetaCnt-1.0) # from 0 to 1 * pi\n \n for j in sp.arange(0.0,phiCnt):\n phi = j/(phiCnt-1.0)*2.0 # from 0 to 2 * pi\n \n FuncCavity = sp.cos(theta*sp.pi)*Reg2Down[ti:tf] + sp.sin(theta*sp.pi)*sp.exp(-1j*phi*sp.pi)*Reg2Up[ti:tf]\n FuncCavity[:] += Reg2Read [ti:tf]\n\n# plt.title(\"state $\\\\theat={0:}$, $\\\\phi={1:}$\".format(theta,phi),fontsize=fs)\n# plt.plot(time['read'][ti:tf],sp.absolute(FuncCavity)**2,color=\"green\",linewidth=2)\n# plt.plot(time['read'][ti:tf],sp.absolute(Reg2DownRead[ti:tf])**2,color=\"blue\",linewidth=2)\n# plt.plot(time['read'][ti:tf],sp.absolute(Reg2UpRead[ti:tf])**2,color=\"red\",linewidth=2)\n# plt.ylabel(\"$A(t)$\",fontsize=fs)\n# plt.xlim([time['read'][ti],time['read'][tf]])\n# plt.show()\n\n FuncInfoOlap [0,i,j] = cumtrapz( (FuncCavity[:].conj() * Reg2Down[ti:tf]).real, x=None, dx=dt )[-1]\n FuncInfoOlap [0,i,j] += 1j*cumtrapz( (FuncCavity[:].conj() * Reg2Down[ti:tf]).imag, x=None, dx=dt )[-1]\n FuncInfoOlap [0,i,j] = (FuncInfoOlap [0,i,j]-I0R)/I00\n\n FuncInfoOlap [1,i,j] = cumtrapz( (FuncCavity[:].conj() * Reg2Up[ti:tf]).real, x=None, dx=dt )[-1]\n FuncInfoOlap [1,i,j] += 1j*cumtrapz( (FuncCavity[:].conj() * Reg2Up[ti:tf]).imag, x=None, dx=dt )[-1]\n FuncInfoOlap [1,i,j] = (FuncInfoOlap [1,i,j]-I1R)/I11\n\n xx, yy = sp.meshgrid(sp.linspace(0.0,2.0,phiCnt),sp.linspace(0.0,1.0,thetaCnt))\n\n zmin = 0.0\n zmax = +1.0\n\n zzOlapR0 = FuncInfoOlap [0,:,:].real\n zzOlapI0 = FuncInfoOlap [0,:,:].imag\n print zzOlapI0 \n\n zzOlapR1 = FuncInfoOlap [1,:,:].real\n zzOlapI1 = FuncInfoOlap [1,:,:].imag\n\n fig = plt.figure()\n\n fig1 = fig.add_subplot(221, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapR0, rstride=5, cstride=5, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapR0.min(), vmax=zzOlapR0.max())\n fig1.set_zlim(-1,1)\n fig1.set_title(\"a) Re$[(\\\\tilde O($\\\"$0$\\\"$)-\\\\tilde {\\cal I}_{0R})/\\\\tilde {\\cal I}_{00}]\\\\approx\\cos(\\\\theta)$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi / \\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(223, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapI0, rstride=5, cstride=5, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapI0.min(), vmax=zzOlapI0.max())\n fig1.set_zlim(-0.01,0.01)\n fig1.set_title(\"c) Im$[(\\\\tilde O($\\\"$0$\\\"$)-\\\\tilde {\\cal I}_{0R})/\\\\tilde {\\cal I}_{00}]\\\\approx0$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi/\\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(222, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapR1, rstride=5, cstride=5, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapR1.min(), vmax=zzOlapR1.max())\n fig1.set_zlim(-1,1)\n fig1.set_title(\"b) Re$[(\\\\tilde O($\\\"$1$\\\"$)-\\\\tilde {\\cal I}_{1R})/\\\\tilde {\\cal I}_{11}]\\\\approx\\sin(\\\\theta)\\cdot\\cos(\\phi)$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi / \\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(224, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapI1, rstride=5, cstride=5, cmap=myMap, alpha=0.5,zorder=11.0,vmin=zzOlapI1.min(), vmax=zzOlapI1.max())\n fig1.set_zlim(-1,1)\n# fig1.set_title(\"a) Re$[(O_{\\mathbb{C}}(\"0\")-\\\\tilde {\\¢al I}_{0R})/\\\\tilde {\\¢al I}_{00}]$\",fontsize=fs)\n fig1.set_title(\"d) Im$[(\\\\tilde O($\\\"$1$\\\"$)-\\\\tilde {\\cal I}_{1R})/\\\\tilde {\\cal I}_{11}]\\\\approx\\sin(\\\\theta)\\cdot\\sin(\\phi)$\",fontsize=fs)\n fig1.set_ylabel(\"$\\\\theta/\\pi$\",fontsize=fs)\n fig1.set_xlabel(\"$\\phi/\\pi$\",fontsize=fs)\n\n plt.show()\n\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.6510416865348816, "alphanum_fraction": 0.71875, "avg_line_length": 37, "blob_id": "6928ed36255a8715f5869b15911c29b82e7e3dc9", "content_id": "3ea3d5b05909e26a04d8264abf50cde43feb073e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 192, "license_type": "no_license", "max_line_length": 151, "num_lines": 5, "path": "/scripts/ifort-checkBloch.sh~", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nrm -f \"$1\"generateBloch\n\nifort -heap-arrays -Tf \"$1\"srcNv/modNvCenter.F95 -Tf \"$1\"srcOptCntrl/modMemoryPulse.F95.F95 -Tf \"$1\"srcMain/main_checkBloch.F95 -free -o \"$1\"checkBloch \n\n" }, { "alpha_fraction": 0.6318453550338745, "alphanum_fraction": 0.6613897681236267, "avg_line_length": 44.630252838134766, "blob_id": "6221f618461c5facd6fca4d8313234ac6d79888b", "content_id": "d8a1526e1c051ce7e025ed1d76bbc8cb82da7bd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10865, "license_type": "no_license", "max_line_length": 162, "num_lines": 238, "path": "/python/MemoryPulsePlot.py~", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom subprocess import Popen, PIPE, call\n\nimport IOHelper\n\n\n##########################################################################################\n### main routine #########################################################################\ndef superimpose (cavityD,cavityU,par1,par2):\n return (sp.cos(par1*sp.pi)*cavityD[:] + 1j*sp.sin(par1*sp.pi)*cavityU[:])*sp.exp(-1j*par1*sp.pi)\n\ndef pltFunction (cavity1,cavity2,cavity3,plotType):\n if (plotType==0):\n return sp.absolute(cavity1[:])**2,sp.absolute(cavity2[:])**2,sp.absolute(cavity3[:])**2\n elif (plotType==1):\n return sp.real(cavity1[:]),sp.real(cavity2[:]),sp.real(cavity3[:])\n elif (plotType==2):\n return sp.imag(cavity1[:]),sp.imag(cavity2[:]),sp.imag(cavity3[:])\n else: \n return cavity1, cavity2, cavity3\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",start=-1,cut1=1000.0,cut2=1000.0,cut3=1000.0,stop=-1,test=0,plotType=0,impose=0,par1=0.0,par2=0.0):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n cavityDownStore,cavityDownRead =IOHelper.harmonics_storage(\"down\",**cfg)\n cavityUpStore ,cavityUpRead =IOHelper.harmonics_storage(\"up\",**cfg)\n time =IOHelper.time_storage(**cfg)\n\n time['write'][:] *= 1e9\n time['store'][:] *= 1e9\n time['read'] [:] *= 1e9\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf]\n\n filename =IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead]\n alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown]\n alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp]\n\n filename =IOHelper.getVectorMemory(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T #sp.random.random_sample([dimH])+0j\n alphaS =reGamma[0:nStore]-1j*imGamma[0:nStore]\n beta =reGamma[-1] -1j*imGamma[-1]\n ### read data ###\n\n ### plotting\n cavityDownReg1 = sp.dot(alphaD.conj(),cavityWrite)\n cavityDownReg2 = sp.dot(alphaS.conj(),cavityDownStore[0:nStore][:])+beta.conj()*cavityDownStore[-2][:]+cavityDownStore[-1][:]\n cavityDownReg3 = sp.dot(alphaS.conj(),cavityDownRead [0:nStore][:])+beta.conj()*cavityDownRead [-2][:]+cavityDownRead [-1][:]\n\n cavityUpReg1 = sp.dot(alphaU.conj(),cavityWrite)\n cavityUpReg2 = sp.dot(alphaS.conj(),cavityUpStore[0:nStore][:])+beta.conj()*cavityUpStore[-2][:]+cavityUpStore[-1][:]\n cavityUpReg3 = sp.dot(alphaS.conj(),cavityUpRead [0:nStore][:])+beta.conj()*cavityUpRead [-2][:]+cavityUpRead [-1][:]\n\n cavitySuperReg1 = superimpose(cavityDownReg1,cavityUpReg1,par1,par2)\n cavitySuperReg2 = superimpose(cavityDownReg2,cavityUpReg2,par1,par2)\n cavitySuperReg3 = superimpose(cavityDownReg3,cavityUpReg3,par1,par2)\n\n cavityCheckReg1 = superimpose(sp.dot(alphaD.conj(),cavityWrite),sp.dot(alphaU.conj(),cavityWrite),par1,par2)\n cavityCheckReg2 = sp.dot(alphaS.conj(),cavityUpStore[0:nStore][:])+beta.conj()*cavityUpStore[-2][:]+superimpose(cavityDownStore[-1],cavityUpStore[-1],par1,par2)\n cavityCheckReg3 = sp.dot(alphaS.conj(),cavityUpRead [0:nStore][:])+beta.conj()*cavityUpRead [-2][:]+superimpose(cavityDownRead[-1],cavityUpRead[-1],par1,par2)\n\n ReadDownReg2 = sp.dot(alphaD.conj(),cavityMemo) + sp.dot(alphaR.conj(),cavityRead)\n ReadUpReg2 = sp.dot(alphaU.conj(),cavityMemo) + sp.dot(alphaR.conj(),cavityRead)\n ReadSuperReg2 = superimpose(ReadDownReg2,ReadUpReg2,par1,par2)\n ReadDownReg2 *= beta.conj()\n ReadUpReg2 *= beta.conj()\n ReadSuperReg2 *= beta.conj()\n\n fs = 20\n\n plt.subplot(4,3,1)\n plt.title(\"state '0', $A(t)$\",fontsize=fs)\n plt.plot(functime,ReadDownReg2[ti:tf].real,color=\"darkgray\",linewidth=2)\n plt.plot(functime,cavityDownReg3[ti:tf].real,color=\"blue\",linewidth=2)\n plt.xlim([min(functime),max(functime)])\n plt.ylabel(\"real part\",fontsize=fs)\n\n plt.subplot(4,3,4)\n plt.plot(functime,ReadDownReg2[ti:tf].imag,color=\"darkgray\",linewidth=2)\n plt.plot(functime,cavityDownReg3[ti:tf].imag,color=\"blue\",linewidth=2)\n plt.xlim([min(functime),max(functime)])\n plt.ylabel(\"imag part\",fontsize=fs)\n\n plt.subplot(4,3,2)\n plt.title(\"state '1', $A(t)$\",fontsize=fs)\n plt.plot(functime,ReadUpReg2[ti:tf].real,color=\"darkgray\",linewidth=2)\n plt.plot(functime,cavityUpReg3[ti:tf].real,color=\"red\",linewidth=2)\n plt.xlim([min(functime),max(functime)])\n\n plt.subplot(4,3,5)\n plt.plot(functime,ReadUpReg2[ti:tf].imag,color=\"darkgray\",linewidth=2)\n plt.plot(functime,cavityUpReg3[ti:tf].imag,color=\"red\",linewidth=2)\n plt.xlim([min(functime),max(functime)])\n\n plt.subplot(4,3,3)\n if (impose != 1):\n plt.title(\"$\\\\alpha_{S}$\",fontsize=fs)\n plt.bar(sp.arange(1,nStore+1,1),alphaS.real,color=\"green\")\n else :\n plt.title(\"superposition, $A(t)$\",fontsize=fs)\n plt.plot(functime,ReadSuperReg2[ti:tf].real,color=\"darkgray\",linewidth=2)\n plt.plot(functime,cavitySuperReg3[ti:tf].real,color=\"green\",linewidth=2)\n plt.xlim([min(functime),max(functime)])\n\n plt.subplot(4,3,6)\n if (impose != 1):\n plt.bar(sp.arange(1,nStore+1,1),alphaS.imag,color=\"green\")\n else :\n plt.plot(functime,ReadSuperReg2[ti:tf].imag,color=\"darkgray\",linewidth=2)\n plt.plot(functime,cavitySuperReg3[ti:tf].imag,color=\"green\",linewidth=2)\n plt.xlim([min(functime),max(functime)])\n\n\n plt.subplot2grid((4,3),(2,0),colspan=3,rowspan=2)\n\n cavityCheckReg1,cavityCheckReg2,cavityCheckReg3 = pltFunction(cavityCheckReg1,cavityCheckReg2,cavityCheckReg3,plotType)\n cavitySuperReg1,cavitySuperReg2,cavitySuperReg3 = pltFunction(cavitySuperReg1,cavitySuperReg2,cavitySuperReg3,plotType)\n cavityDownReg1, cavityDownReg2, cavityDownReg3 = pltFunction(cavityDownReg1, cavityDownReg2, cavityDownReg3, plotType)\n cavityUpReg1, cavityUpReg2, cavityUpReg3 = pltFunction(cavityUpReg1, cavityUpReg2, cavityUpReg3, plotType)\n\n if (test==1):\n print (\"### compile fortran routines\")\n cmd = \"./scripts/ifort-memoryHarmonics.sh \" + wd\n call(cmd.split())\n\n print (\"### call memoryOptimized\")\n cmd=wd+\"memoryOptimized\"\n call(cmd.split())\n\n print (\"### call memorySuperimposed\")\n cmd=wd+\"memorySuperimposed\"\n generateSuperposition = Popen(cmd.split(), stdin=PIPE) # run fortran program with piped standard input\n cmd = \"echo {:}\".format(par1) # communication with fortran-routine: chose superposition parameter\n generateInput = Popen(cmd.split(), stdout=generateSuperposition.stdin) # send action to fortran program\n output = generateSuperposition.communicate()[0]\n generateInput.wait()\n\n filename = cfg['FILES']['{prefix}']+cfg['FILES']['{name_readwrite}']+\\\n cfg['FILES']['{name_storage}']+cfg['FILES']['{name_optimized}']\n\n mytimeU,__,A_Re_U,A_Im_U = sp.loadtxt(filename+\"cavity_up_stored\" +cfg['FILES']['{postfix}']).T\n mytimeD,__,A_Re_D,A_Im_D = sp.loadtxt(filename+\"cavity_down_stored\" +cfg['FILES']['{postfix}']).T\n mytimeS,__,A_Re_S,A_Im_S = sp.loadtxt(filename+\"cavity_super_stored\"+cfg['FILES']['{postfix}']).T\n\n cavityMode_U,cavityMode_D,cavityMode_S = pltFunction(A_Re_U+1j*A_Im_U, A_Re_D+1j*A_Im_D, A_Re_S+1j*A_Im_S,plotType)\n\n cavityMaxDown = max(max(cavityDownReg1),max(cavityDownReg2),max(cavityDownReg3))\n cavityMaxUp = max(max(cavityUpReg1),max(cavityUpReg2),max(cavityUpReg3))\n cavityMaxSuper= max(max(cavitySuperReg1),max(cavitySuperReg2),max(cavitySuperReg3))\n cavityMax = max(cavityMaxDown,cavityMaxUp,cavityMaxSuper)\n\n cavityMinDown = min(min(cavityDownReg1),min(cavityDownReg2),min(cavityDownReg3))\n cavityMinUp = min(min(cavityUpReg1),min(cavityUpReg2),min(cavityUpReg3))\n cavityMinSuper= max(min(cavitySuperReg1),min(cavitySuperReg2),min(cavitySuperReg3))\n cavityMin = min(cavityMinDown,cavityMinUp,cavityMinSuper)\n\n plt.plot(time['write'],cavityDownReg1,color=\"blue\",linewidth=2)\n plt.plot(time['store'],cavityDownReg2,color=\"blue\",linewidth=2)\n plt.plot(time['read'] ,cavityDownReg3,color=\"blue\",linewidth=2,label=\"state '0'\",fontsize=fs)\n\n plt.plot(time['write'],cavityUpReg1,color=\"red\",linewidth=2)\n plt.plot(time['store'],cavityUpReg2,color=\"red\",linewidth=2)\n plt.plot(time['read'] ,cavityUpReg3,color=\"red\",linewidth=2,label=\"state '1'\",fontsize=fs)\n\n if (impose == 1):\n plt.plot(time['write'],cavitySuperReg1,color=\"green\",linewidth=2)\n plt.plot(time['store'],cavitySuperReg2,color=\"green\",linewidth=2)\n plt.plot(time['read'] ,cavitySuperReg3,color=\"green\",linewidth=2,label=\"super state\",fontsize=fs)\n\n if (test == 1):\n plt.plot(mytimeD,cavityMode_D,label=\"state '0' test\",color=\"cyan\")\n plt.plot(mytimeU,cavityMode_U,label=\"state '1' test\",color=\"magenta\")\n if (impose == 1):\n plt.plot(time['write'],cavityCheckReg1,color=\"orange\")\n plt.plot(time['store'],cavityCheckReg2,color=\"orange\")\n plt.plot(time['read'] ,cavityCheckReg3,color=\"orange\",label=\"super state\")\n plt.plot(mytimeS,cavityMode_S,label=\"super state test\",color=\"brown\")\n\n# plt.legend()\n plt.xlabel('time in ns',fontsize=fs)\n plt.ylabel('$|A(t)|^2$',fontsize=fs)\n\n \n if start != -1 and stop != -1:\n plt.xlim([start,stop])\n else:\n plt.xlim([min(time['write']),max(time['read'])])\n\n plt.ylim([cavityMin*1.1,cavityMax*1.1])\n\n plt.fill_between(functime, cavityMax*1.1, cavityMin*1.1, color='lightgray', facecolor='lightgray', alpha=0.5)\n plt.plot([time['store'][0],time['store'][0]], [cavityMin*1.1,cavityMax*1.1], 'k--')\n plt.plot([time['read'] [0],time['read'] [0]], [cavityMin*1.1,cavityMax*1.1], 'k--')\n\n plt.plot([cut1,cut1],[0,cavityMax*1.1],linewidth=2.0)\n plt.plot([cut2,cut2],[0,cavityMax*1.1],linewidth=2.0)\n plt.plot([cut3,cut3],[0,cavityMax*1.1],linewidth=2.0)\n plt.show()\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.5660331845283508, "alphanum_fraction": 0.6065672636032104, "avg_line_length": 38.94711685180664, "blob_id": "220d31629f756e224ec1ef8e345c10d213c54a7c", "content_id": "57d72a9ddd3af67f33e4f634dda162408e90e048", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8314, "license_type": "no_license", "max_line_length": 125, "num_lines": 208, "path": "/python/SmallestOverlapPhasevariation.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nfrom scipy.integrate import cumtrapz\nfrom math import acos,asin\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\n\nimport IOHelper\n\n\n##########################################################################################\n### main routine #########################################################################\ndef superimpose (cavityD,cavityU,par1,par2):\n if (par2 == 0) :\n return (sp.cos(par1*sp.pi)*cavityD[:] + 1j*sp.sin(par1*sp.pi)*cavityU[:])*sp.exp(-1j*par1*sp.pi)\n elif (par2 == 1) :\n return sp.exp(-1j*par1*sp.pi)*cavityD\n else :\n return cavityD\n\ndef pltFunction (cavity1,cavity2,cavity3,plotType):\n if (plotType==0):\n return sp.absolute(cavity1[:])**2,sp.absolute(cavity2[:])**2,sp.absolute(cavity3[:])**2\n elif (plotType==1):\n return sp.real(cavity1[:]),sp.real(cavity2[:]),sp.real(cavity3[:])\n elif (plotType==2):\n return sp.imag(cavity1[:]),sp.imag(cavity2[:]),sp.imag(cavity3[:])\n else: \n return cavity1, cavity2, cavity3\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",cnt=11,sptype=1,myCmap=\"RdYlBu\"):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n\n time['write'][:] *= 1e9\n time['read'][:] *= 1e9\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf] \n dt = float(time['delta_t'])\n\n filename =IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead]\n alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown]\n alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp]\n ### read data ###\n\n ### plotting\n Reg1Up = sp.dot(alphaU.conj(),cavityWrite)\n Reg1Down = sp.dot(alphaD.conj(),cavityWrite)\n\n Reg2Down = sp.dot(alphaD.conj(),cavityMemo)\n Reg2Up = sp.dot(alphaU.conj(),cavityMemo)\n Reg2Read = sp.dot(alphaR.conj(),cavityRead)\n\n Reg2DownRead = Reg2Down + Reg2Read\n Reg2UpRead = Reg2Up + Reg2Read\n\n FuncInfo = sp.zeros([cnt,2])\n FuncInfoPhase = sp.zeros([cnt,2])\n FuncSuperRead = sp.zeros([cnt,functime.size],complex)\n\n denom = sp.zeros([2])\n denom[0] = cumtrapz( sp.absolute(Reg2DownRead[ti:tf])**2, x=None, dx=dt )[-1] \n denom[1] = cumtrapz( sp.absolute(Reg2UpRead[ti:tf])**2, x=None, dx=dt )[-1] \n \n for i in sp.arange(0.0,cnt):\n phi = i*2.0/(cnt-1.0)\n\n Reg1Super = superimpose(Reg1Down,Reg1Up,phi,sptype)\n Reg2Super = superimpose(Reg2Down,Reg2Up,phi,sptype)\n Reg2SuperRead = Reg2Super + Reg2Read\n\n FuncSuperRead[i,:] = Reg2SuperRead[ti:tf]\n \n FuncInfoIntegrand = sp.absolute(FuncSuperRead[i,:]) * sp.absolute(Reg2DownRead[ti:tf])\n FuncInfo[i,0] = cumtrapz( FuncInfoIntegrand, x=None, dx=dt )[-1]/denom[0]\n \n FuncInfoIntegrand = sp.absolute(FuncSuperRead[i,:]) * sp.absolute(Reg2UpRead[ti:tf])\n FuncInfo[i,1] = cumtrapz( FuncInfoIntegrand, x=None, dx=dt )[-1]/denom[1]\n\n FuncInfoIntegrand = FuncSuperRead[i,:].conj() * Reg2UpRead[ti:tf]\n FuncInfoPhase[i,0] = cumtrapz( FuncInfoIntegrand.real, x=None, dx=dt )[-1]/denom[1]\n FuncInfoPhase[i,1] = cumtrapz( FuncInfoIntegrand.imag, x=None, dx=dt )[-1]/denom[1]\n\n\n xx, yy = sp.meshgrid(functime,sp.linspace(0.0,2.0,cnt))\n\n zzR = FuncSuperRead.real/FuncSuperRead.real.max()\n zzI = FuncSuperRead.imag/FuncSuperRead.imag.max()\n zzA = sp.absolute(FuncSuperRead)**2/((sp.absolute(FuncSuperRead)**2).max())\n\n zmin = -1.5\n zmax = +1.0\n fs = 20\n\n cm = plt.cm.get_cmap(myCmap)\n myColors = cm(zzR)\n\n fig = plt.figure()\n fig0 = fig.add_subplot(231, projection='3d')\n fig0.plot_surface(xx, yy, zzA, rstride=10, cstride=5, cmap=cm, alpha=0.5,zorder=11.0,vmin=-1, vmax=1)\n fig0.contourf(xx, yy, zzA, zdir='z', offset=zmin, cmap=cm, vmin=-1, vmax=1,zorder=1.0)\n fig0.set_zlim(zmin,zmax)\n fig0.set_title(\"a) normalized $|A_0(t;\\phi_0)|^2$\",fontsize=fs)\n fig0.set_xlabel(\"$t$ in ns\",fontsize=fs)\n fig0.set_ylabel(\"$\\phi_0/\\pi$\",fontsize=fs)\n\n fig1 = fig.add_subplot(232, projection='3d')\n fig1.plot_surface(xx, yy, zzR, rstride=10, cstride=5, cmap=cm, alpha=0.5,zorder=11.0,vmin=-1, vmax=1)\n fig1.contourf(xx, yy, zzR, zdir='z', offset=zmin, cmap=cm, vmin=-1, vmax=1,zorder=1.0)\n fig1.set_zlim(zmin,zmax)\n fig1.set_title(\"b) normalized $Re[\\,A_0(t;\\phi_0)\\,]$\",fontsize=fs)\n fig1.set_xlabel(\"$t$ in ns\",fontsize=fs)\n fig1.set_ylabel(\"$\\phi_0/\\pi$\",fontsize=fs)\n\n fig2 = fig.add_subplot(233, projection='3d')\n fig2.plot_surface(xx, yy, zzI, rstride=10, cstride=5, cmap=cm, alpha=0.5,zorder=11.0,vmin=-1, vmax=1)\n fig2.contourf(xx, yy, zzI, zdir='z', offset=zmin, cmap=cm, vmin=-1, vmax=1,zorder=1.0)\n fig2.set_zlim(zmin,zmax)\n fig2.set_title(\"c) normalized $Im[\\,A_0(t;\\phi_0)\\,]$\",fontsize=fs)\n fig2.set_xlabel(\"$t$ in ns\",fontsize=fs)\n fig2.set_ylabel(\"$\\phi_0/\\pi$\",fontsize=fs)\n\n plt.subplot2grid((2,3),(1,0),colspan=1,rowspan=1)\n plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfo[:,0],label=\"overlap with $i=$'$0$'\",linewidth=\"2\",color=\"blue\")\n plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfo[:,1],label=\"overlap with $i=$'$1$'\",linewidth=\"2\",color=\"red\")\n plt.ylim(0,1.1)\n plt.xlabel(\"$\\phi_0/\\pi$\",fontsize=fs)\n plt.legend(bbox_to_anchor=(0.52, 0.0), loc=3, borderaxespad=0.)\n plt.title(\"d) classical overlap, $O_{\\mathbb{R}}(i)$\",fontsize=fs)\n# plt.title(\"$\\\\frac{1}{N}\\int_{T_{{\\cal F}1}}^{T_{{\\cal F}2}}dt\\,|A_0(t;\\phi_0)|\\cdot|A_i(t)|$\",fontsize=fs)\n\n# FuncInfoPhase[:,0]-=min(FuncInfoPhase[:,0])\n# FuncInfoPhase[:,0]/=0.5*max(sp.absolute(FuncInfoPhase[:,0]))\n# FuncInfoPhase[:,0]-=1.0\n\n# FuncInfoPhase[:,1]-=min(FuncInfoPhase[:,1])\n# FuncInfoPhase[:,1]/=0.5*max(sp.absolute(FuncInfoPhase[:,1]))\n# FuncInfoPhase[:,1]-=1.0\n\n plt.subplot2grid((2,3),(1,1),colspan=1,rowspan=1)\n plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,0],label=\"Re[ $O_{\\mathbb{C}}($'$1$'$)$ ]\",linewidth=\"2\",color=\"red\")\n plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,1],label=\"Im[ $O_{\\mathbb{C}}($'$1$'$)$ ]\",linewidth=\"2\",color=\"magenta\")\n plt.title(\"e) complex overlap, $O_{\\mathbb{C}}($'$1$'$)$\",fontsize=fs)\n# plt.title(\"$\\\\frac{1}{N}\\int_{T_{{\\cal F}1}}^{T_{{\\cal F}2}}dt\\,A_0(t;\\phi_0)^*\\cdot A_1(t)$\",fontsize=fs)\n plt.xlabel(\"$\\phi_0/\\pi$\",fontsize=fs)\n plt.legend(bbox_to_anchor=(0.66, 0.0), loc=3, borderaxespad=0.)\n plt.ylim(-1,1.1)\n\n FuncInfoPhase[:,0]-=min(FuncInfoPhase[:,0])\n FuncInfoPhase[:,0]/=0.5*max(sp.absolute(FuncInfoPhase[:,0]))\n FuncInfoPhase[:,0]-=1.0\n\n FuncInfoPhase[:,1]-=min(FuncInfoPhase[:,1])\n FuncInfoPhase[:,1]/=0.5*max(sp.absolute(FuncInfoPhase[:,1]))\n FuncInfoPhase[:,1]-=1.0\n\n# plt.subplot2grid((2,3),(1,2),colspan=1,rowspan=1)\n# plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,0],linewidth=\"2\",color=\"red\")\n# plt.plot(sp.linspace(0.0,2.0,cnt),FuncInfoPhase[:,1],linewidth=\"2\",color=\"magenta\")\n# plt.title(\"f) $O_{\\mathbb{C}}$ scaled & translated, $(O_{\\mathbb{C}}($'$1$'$)-I_R)/I_0$\",fontsize=fs)\n# plt.xlabel(\"$\\phi_0/\\pi$\",fontsize=fs)\n# plt.ylim(-1,1)\n\n\n plt.show()\n\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.5418124198913574, "alphanum_fraction": 0.5612082481384277, "avg_line_length": 28.923809051513672, "blob_id": "0e47be71d1241a852babc12a16079e0857733ddf", "content_id": "14a81c70b0cf8f7fcf8f9fc691dc83fb76d02865", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3145, "license_type": "no_license", "max_line_length": 79, "num_lines": 105, "path": "/python/blochsphere.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\nimport argh\nfrom qutip import Bloch\nfrom math import sqrt, sin, cos, pi\nfrom colorsys import hsv_to_rgb\nfrom numpy import linspace, outer, ones, sin, cos, arccos, arctan2, size, empty\nfrom scipy.interpolate import interp2d\nfrom numpy.random import rand\n\ninterpolated_density=0.0\n\nclass BlochDensity(Bloch):\n def plot_back(self):\n global interpolated_density\n # back half of sphere\n u = linspace(0, pi, 25)\n v = linspace(0, pi, 25)\n x = outer(cos(u), sin(v))\n y = outer(sin(u), sin(v))\n z = outer(ones(size(u)), cos(v))\n\n colours = empty(x.shape, dtype=object)\n for i in range(len(x)):\n for j in range(len(y)):\n theta = arctan2(y[i,j], x[i,j])\n phi = arccos(z[i,j])\n\n colours[i,j] = self.density(theta, phi)\n\n\n self.axes.plot_surface(x, y, z, rstride=1, cstride=1,\n facecolors=colours,\n alpha=self.sphere_alpha, \n linewidth=0, antialiased=True)\n # wireframe\n self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5,\n color=self.frame_color,\n alpha=self.frame_alpha)\n # equator\n self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='z',\n lw=self.frame_width, color=self.frame_color)\n self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='x',\n lw=self.frame_width, color=self.frame_color)\n\n\n\n def plot_front(self):\n global interpolated_density\n # front half of sphere\n u = linspace(-pi, 0, 25)\n v = linspace(0, pi, 25)\n x = outer(cos(u), sin(v))\n y = outer(sin(u), sin(v))\n z = outer(ones(size(u)), cos(v))\n\n colours = empty(x.shape, dtype=object)\n for i in range(len(x)):\n for j in range(len(y)):\n theta = arctan2(y[i,j], x[i,j])\n phi = arccos(z[i,j])\n\n colours[i,j] = self.density(theta, phi)\n\n\n self.axes.plot_surface(x, y, z, rstride=1, cstride=1,\n facecolors=colours,\n alpha=self.sphere_alpha, \n linewidth=0, antialiased=True)\n\n\n # wireframe\n self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5,\n color=self.frame_color,\n alpha=self.frame_alpha)\n # equator\n self.axes.plot(1.0 * cos(u), 1.0 * sin(u),\n zs=0, zdir='z', lw=self.frame_width,\n color=self.frame_color)\n self.axes.plot(1.0 * cos(u), 1.0 * sin(u),\n zs=0, zdir='x', lw=self.frame_width,\n color=self.frame_color)\n\ndef f(theta, phi):\n global interpolated_density\n return hsv_to_rgb(interpolated_density(theta,phi), 1, 1)\n\ndef main_routine ():\n global interpolated_density\n b = BlochDensity()\n b.sphere_alpha=0.5\n\n thetas, phis = linspace(-pi,pi,10), linspace(0,pi,10)\n density = rand(len(thetas), len(phis))\n\n ##scale density to a maximum of 1\n density /= density.max()\n\n interpolated_density = interp2d(thetas, phis, density)\n\n\n b.density = f\n b.show()\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n" }, { "alpha_fraction": 0.5805827379226685, "alphanum_fraction": 0.6160077452659607, "avg_line_length": 42.50526428222656, "blob_id": "135ac5eaedae9c3e75c62184a9270d208f74e1b7", "content_id": "5799193778a56c8317cdbab939a6aa2c0b638e43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8271, "license_type": "no_license", "max_line_length": 146, "num_lines": 190, "path": "/python/SmallestOverlapTimedelayedPhasevariation.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\nfrom scipy.integrate import cumtrapz\nfrom math import acos,asin\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport matplotlib.pyplot as plt\n\nimport IOHelper\n\n\n##########################################################################################\n### main routine #########################################################################\n\ndef pltFunction (cavity1,cavity2,cavity3,plotType):\n if (plotType==0):\n return sp.absolute(cavity1[:])**2,sp.absolute(cavity2[:])**2,sp.absolute(cavity3[:])**2\n elif (plotType==1):\n return sp.real(cavity1[:]),sp.real(cavity2[:]),sp.real(cavity3[:])\n elif (plotType==2):\n return sp.imag(cavity1[:]),sp.imag(cavity2[:]),sp.imag(cavity3[:])\n else: \n return cavity1, cavity2, cavity3\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (wd=\"./\",cfg=\"./python/parameter.cfg\",delayCnt=11,delay=27.725,phaseCnt=11,sType=1,p3d=0,AMap=\"Blues\",RMap=\"Reds\",IMap=\"Greens\"):\n\n ### read config file ###\n print (\"load from config file: \" + cfg)\n\n configParser = cp.ConfigParser()\n configParser.read(cfg)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n omega_c = float(cfg['NVSETUP']['{omega_c}'])\n\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nStore=int(cfg['MEFourier']['{storage_harmonic}'])\n\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n ### read config file ###\n\n ### read data ### \n cavityWrite,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n\n time['write'][:] *= 1e9\n time['read'][:] *= 1e9\n ti = int(time['idx_ti'])\n tf = int(time['idx_tf'])\n functime = time['read'][ti:tf] \n dt = float(time['delta_t'])\n\n filename =IOHelper.getVectorOverlap(**cfg)\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] -1j*imGamma[0:nRead]\n alphaD =reGamma[nRead:nDown]-1j*imGamma[nRead:nDown]\n alphaU =reGamma[nDown:nUp] -1j*imGamma[nDown:nUp]\n ### read data ###\n\n ### plotting\n Reg1Up = sp.dot(alphaU.conj(),cavityWrite)\n Reg1Down = sp.dot(alphaD.conj(),cavityWrite)\n\n Reg2Down = sp.dot(alphaD.conj(),cavityMemo)\n Reg2Up = sp.dot(alphaU.conj(),cavityMemo)\n Reg2Read = sp.dot(alphaR.conj(),cavityRead)\n\n Reg2DownRead = Reg2Down + Reg2Read\n Reg2UpRead = Reg2Up + Reg2Read\n\n# FuncInfo = sp.zeros([3,delayCnt,phaseCnt])\n# FuncInfoPhase = sp.zeros([3,delayCnt,phaseCnt])\n# FuncShiftRead = sp.zeros([3,delayCnt,phaseCnt,functime.size],complex)\n FuncShiftRead = sp.zeros([delayCnt,phaseCnt,functime.size],complex)\n FuncInfoOlap = sp.zeros([3,delayCnt,phaseCnt])\n FuncInfoArea = sp.zeros([3,delayCnt,phaseCnt])\n\n for i in sp.arange(0.0,delayCnt):\n myDelay = i/(delayCnt-1.0)*delay\n\n shift=sp.absolute(time['read'][:]-(functime[0]-myDelay)).argmin()-1\n \n for j in sp.arange(0.0,phaseCnt):\n phi = j*2.0*sp.pi/(phaseCnt-1.0)\n \n FuncShiftRead[i,j,:] = sp.exp(-1j*phi)*Reg2Down[shift:shift+functime.size] + Reg2Read[shift:shift+functime.size]\n FuncShiftRead[i,j,:] += Reg2UpRead [ti:tf] # ????? why ??????\n\n\n FuncInfoIntegrand = sp.absolute(FuncShiftRead[i,j,:])*sp.absolute(Reg2UpRead[ti:tf])\n FuncInfoOlap [0,i,j] = cumtrapz( FuncInfoIntegrand, x=None, dx=dt )[-1]\n\n FuncInfoIntegrand = FuncShiftRead[i,j,:].conj() * Reg2UpRead[ti:tf]\n FuncInfoOlap [1,i,j] = cumtrapz( FuncInfoIntegrand.real, x=None, dx=dt )[-1]\n FuncInfoOlap [2,i,j] = cumtrapz( FuncInfoIntegrand.imag, x=None, dx=dt )[-1]\n\n\n FuncInfoArea [0,i,j] = cumtrapz( sp.absolute(FuncShiftRead[i,j,:])**2, x=None, dx=dt )[-1]\n FuncInfoArea [1,i,j] = cumtrapz( sp.real (FuncShiftRead[i,j,:]) , x=None, dx=dt )[-1]\n FuncInfoArea [2,i,j] = cumtrapz( sp.imag (FuncShiftRead[i,j,:]) , x=None, dx=dt )[-1]\n\n\n\n xx, yy = sp.meshgrid(sp.linspace(0.0,2.0*sp.pi,phaseCnt),sp.linspace(0.0,delay,delayCnt))\n\n zzOlap2 = FuncInfoOlap[0,:,:]#/(sp.absolute(FuncInfoOlap[0,:,:]).max())\n zzOlapR = FuncInfoOlap[1,:,:]#/(sp.absolute(FuncInfoOlap[1,:,:]).max())\n zzOlapI = FuncInfoOlap[2,:,:]#/(sp.absolute(FuncInfoOlap[2,:,:]).max())\n\n zzArea2 = FuncInfoArea[0,:,:]#/(sp.absolute(FuncInfoArea[0,:,:]).max())\n zzAreaR = FuncInfoArea[1,:,:]#/(sp.absolute(FuncInfoArea[1,:,:]).max())\n zzAreaI = FuncInfoArea[2,:,:]#/(sp.absolute(FuncInfoArea[2,:,:]).max())\n\n zmin = 0.0\n zmax = +1.0\n fs = 20\n\n fig = plt.figure()\n\n fig1 = fig.add_subplot(231, projection='3d')\n fig1.plot_surface(xx, yy, zzOlapR, rstride=5, cstride=5, cmap=RMap, alpha=0.5,zorder=11.0,vmin=zzOlapR.min(), vmax=zzOlapR.max())\n# fig1.contourf(xx, yy, zzOlapR, zdir='z', offset=zmin, cmap=RMap, vmin=-1, vmax=1,zorder=1.0)\n fig1.set_zlim(zzOlapR.min(),zzOlap2.max())\n fig1.set_title(\"a) Re$(O_{\\mathbb{C}}(e^{-i\\Phi_0}A_0(t-\\Delta t)+A_1(t);A_1(t)))$\",fontsize=fs)\n fig1.set_ylabel(\"$\\Delta t$ in ns\",fontsize=fs)\n fig1.set_xlabel(\"$\\Phi_0/\\pi$\",fontsize=fs)\n\n fig2 = fig.add_subplot(232, projection='3d')\n fig2.plot_surface(xx, yy, zzOlapI, rstride=5, cstride=5, cmap=IMap, alpha=0.5,zorder=11.0,vmin=zzOlapI.min(), vmax=zzOlapI.max())\n# fig2.contourf(xx, yy, zzOlapI, zdir='z', offset=zmin, cmap=IMap, vmin=-1, vmax=1,zorder=1.0)\n fig2.set_zlim(zzOlapI.min(),zzOlapI.max())\n fig2.set_title(\"b) Im$(O_{\\mathbb{C}}(e^{-i\\Phi_0}A_0(t-\\Delta t)+A_1(t);A_1(t)))$\",fontsize=fs)\n fig2.set_ylabel(\"$\\Delta t$ in ns\",fontsize=fs)\n fig2.set_xlabel(\"$\\Phi_0/\\pi$\",fontsize=fs)\n\n fig0 = fig.add_subplot(233, projection='3d')\n fig0.plot_surface(xx, yy, zzOlap2, rstride=5, cstride=5, cmap=AMap, alpha=0.5,zorder=11.0,vmin=zzOlap2.min(), vmax=zzOlap2.max())\n# fig0.contourf(xx, yy, zzOlap2, zdir='z', offset=zmin, cmap=AMap, vmin=0, vmax=1,zorder=1.0)\n fig0.set_zlim(zzOlap2.min(),zzOlap2.max())\n fig0.set_title(\"c) $O_{\\mathbb{R}}(e^{-i\\Phi_0}A_0(t-\\Delta t)+A_1(t);A_1(t))$\",fontsize=fs)\n fig0.set_ylabel(\"$\\Delta t$ in ns\",fontsize=fs)\n fig0.set_xlabel(\"$\\Phi_0/\\pi$\",fontsize=fs)\n\n fig4 = fig.add_subplot(234, projection='3d')\n fig4.plot_surface(xx, yy, zzAreaR, rstride=5, cstride=5, cmap=RMap, alpha=0.5,zorder=11.0,vmin=zzAreaR.min(), vmax=zzAreaR.max())\n# fig4.contourf(xx, yy, zzAreaR, zdir='z', offset=zmin, cmap=RMap, vmin=-1, vmax=1,zorder=1.0)\n fig4.set_zlim(zzAreaR.min(),zzAreaR.max())\n fig4.set_title(\"d) Area under Re$(e^{-i\\Phi_0}A_0(t-\\Delta t)+A_1(t))$\",fontsize=fs)\n fig4.set_ylabel(\"$\\Delta t$ in ns\",fontsize=fs)\n fig4.set_xlabel(\"$\\Phi_0/\\pi$\",fontsize=fs)\n\n fig5 = fig.add_subplot(235, projection='3d')\n fig5.plot_surface(xx, yy, zzAreaI, rstride=5, cstride=5, cmap=IMap, alpha=0.5,zorder=11.0,vmin=zzAreaI.min(), vmax=zzAreaI.max())\n # fig5.contourf(xx, yy, zzAreaI, zdir='z', offset=zmin, cmap=IMap, vmin=-1, vmax=1,zorder=1.0)\n fig5.set_zlim(zzAreaI.min(),zzAreaI.max())\n fig5.set_title(\"e) Area under Im$(e^{-i\\Phi_0}A_0(t-\\Delta t)+A_1(t))$\",fontsize=fs)\n fig5.set_ylabel(\"$\\Delta t$ in ns\",fontsize=fs)\n fig5.set_xlabel(\"$\\Phi_0/\\pi$\",fontsize=fs)\n\n fig3 = fig.add_subplot(236, projection='3d')\n fig3.plot_surface(xx, yy, zzArea2, rstride=5, cstride=5, cmap=AMap, alpha=0.5,zorder=11.0,vmin=zzArea2.min(), vmax=zzArea2.max())\n# fig3.contourf(xx, yy, zzArea2, zdir='z', offset=zmin, cmap=AMap, vmin=0, vmax=1,zorder=1.0)\n fig3.set_zlim(zzArea2.min(),zzArea2.max())\n fig3.set_title(\"f) Area under $|e^{-i\\Phi_0}A_0(t-\\Delta t)+A_1(t)|^2$\",fontsize=fs)\n fig3.set_ylabel(\"$\\Delta t$ in ns\",fontsize=fs)\n fig3.set_xlabel(\"$\\Phi_0/\\pi$\",fontsize=fs)\n\n plt.show()\n\n ### plotting\n\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n\n\n" }, { "alpha_fraction": 0.5084450840950012, "alphanum_fraction": 0.5181321501731873, "avg_line_length": 38.0815544128418, "blob_id": "d432bf0548ad17bcb22c7b476045be6116931baa", "content_id": "7526de861d41e99848e5d36e49c2896ab9d08c42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20140, "license_type": "no_license", "max_line_length": 183, "num_lines": 515, "path": "/python/SmallestOverlapEvaluate.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport argh\nimport os, re, sys\nimport ConfigParser as cp\nimport scipy as sp\n\nfrom scipy.optimize import minimize\nfrom subprocess import Popen, PIPE, call\nimport matplotlib.pyplot as plt\n\nimport IOHelper\nfrom IOHelper import replace_in_file\n\n##########################################################################################\n### globals ##############################################################################\ntime = 0.0\ncavityRead = 0.0\ncavityMemo = 0.0\nnUp = 0\nnDown = 0\nnRead = 0\nnWrite = 0\ndimH = 0\ncfg = 0.0\nh0 = 0.0\nhc_up = 0.0 \nhc_down = 0.0 \nhs_up = 0.0\nhs_down = 0.0 \nhsep = 0.0 \nweight_up = 0.0\nweight_down = 0.0\nsepZeroWeight = 0.0\nnormUp = 0.0\nnormDown = 0.0\nnormRead = 0.0\nhole = 0.0\niteration = 0\n### globals ##############################################################################\n##########################################################################################\n\n\n##########################################################################################\n### variational coefficients #############################################################\n##########################################################################################\ndef initGamma0():\n global dimH,nRead,nDown,nUp\n print (\" initialize variational vector: random\")\n gamma0=sp.random.random_sample([dimH])+0j\n gamma0[ :nRead]=gamma0[ :nRead]/sp.linalg.norm(gamma0[ :nRead])*normRead\n gamma0[nRead:nDown]=gamma0[nRead:nDown]/sp.linalg.norm(gamma0[nRead:nDown])*normDown\n gamma0[nDown:nUp ]=gamma0[nDown:nUp ]/sp.linalg.norm(gamma0[nDown:nUp ])*normUp\n\n return gamma0\n##########################################################################################\n\n##########################################################################################\ndef initx(gamma0):\n global dimH\n print (\" map complex valued initial vector gamma to real valued x\")\n\n x =sp.zeros([2*dimH])\n x0 =sp.zeros([2*dimH]) # map complex valued vector to real valued vector\n x0[::2] =gamma0[:].real # odds: 1,3,5,... <-> real part\n x0[1::2] =gamma0[:].imag # even: 2,4,6,... <-> imag part\n\n print (\" shape x_R: \" + str(x0[ :2*nRead].shape) + \"; normRead**2 - norm(alphaR)**2:\" + str(constraintNormRead(x0)))\n print (\" shape x_D: \" + str(x0[2*nRead:2*nDown].shape) + \"; normDown**2 - norm(alphaD)**2:\" + str(constraintNormDown(x0)))\n print (\" shape x_U: \" + str(x0[2*nDown:2*nUp ].shape) + \"; normUp**2 - norm(alphaU)**2:\" + str(constraintNormUp (x0)))\n\n return x, x0\n##########################################################################################\n\n##########################################################################################\ndef getGamma(x):\n global nRead,nDown,nUp, dimH\n# gamma = sp.zeros([dimH],complex)\n gamma = x[::2]+1j*x[1::2]\n return gamma\n##########################################################################################\n\n\n##########################################################################################\n### functionals ##########################################################################\n##########################################################################################\ndef seperateZero(x):\n \"\"\" minimization functional at seperate time-intervals \"\"\"\n global h0\n\n gamma=getGamma(x)\n return sp.dot( sp.dot(gamma.conj().T,h0), gamma).real\n\n##########################################################################################\ndef partialOverlap(x):\n global h0, nRead, nDown, nUp, dimH\n \n gamma=getGamma(x)\n\n gammaDown = sp.zeros([dimH],complex)\n gammaDown[nRead:nDown]= gamma[nRead:nDown]\n gammaDown[nDown:nUp ]= gamma[nRead:nDown]\n\n gammaUp = sp.zeros([dimH],complex)\n gammaUp[nRead:nDown] = gamma[nDown:nUp]\n gammaUp[nDown:nUp ] = gamma[nDown:nUp]\n \n return sp.absolute(sp.dot( sp.dot(gammaDown.conj().T,h0), gammaUp))\n\n\ndef constraintPartialOverlap(x):\n global cfg,time\n funcTime =float(time['tf'])-float(time['ti'])\n weight=float(cfg ['OCConstraints']['{partial_weight_limit}'])\n return weight - partialOverlap(x)\n\n##########################################################################################\ndef seperateStore(x):\n \"\"\" minimization functional at seperate time-intervals \"\"\"\n global hs_down,hs_up \n myFunc = hs_up + hs_down\n gamma=getGamma(x)\n return sp.dot( sp.dot(gamma.conj().T,myFunc), gamma).real\n\n##########################################################################################\ndef seperateZeroStore(x):\n \"\"\" minimization functional at seperate time-intervals \"\"\"\n global h0,hs_down,hs_up \n myFunc = hs_up + hs_down + h0\n gamma=getGamma(x)\n return sp.dot( sp.dot(gamma.conj().T,myFunc), gamma).real\n\n\n#def smallestComplxOverlap(x):\n# \"\"\" minimization functional for complex overlap \"\"\"\n# global hreal,himag\n\n# gamma=x[::2]+1j*x[1::2]\n# return sp.absolute(sp.dot(gamma.conj().T,sp.dot(hreal,gamma))) + sp.absolute(sp.dot(gamma.conj().T,sp.dot(himag,gamma)))\n\n\n##########################################################################################\ndef smallestAbsoluteOverlap(x):\n \"\"\" minimization functional of classical overlap (|A¹(t)| |A²(t)|) \"\"\"\n global cavityRead,cavityMemo,time,nRead,nDown,nUp\n\n ti =int(time['idx_ti' ])\n tf =int(time['idx_tf' ])\n dt =float(time['delta_t'])\n\n gamma=getGamma(x)\n\n totalRead=sp.dot(gamma[ :nRead].conj().T,cavityRead[:,ti:tf])\n totalDown=sp.dot(gamma[nRead:nDown].conj().T,cavityMemo[:,ti:tf])\n totalUp =sp.dot(gamma[nDown:nUp ].conj().T,cavityMemo[:,ti:tf])\n\n overlapIntegrand=sp.absolute(totalRead+totalUp) * sp.absolute(totalRead+totalDown)\n\n return sp.integrate.cumtrapz( overlapIntegrand, x=None, dx=dt )[-1] # return last element of trapezoid integration\n\n##########################################################################################\ndef smallestOverlapSeperateZero(x):\n \"\"\" minimization functional of classical overlap (|A¹(t)| |A²(t)|) \"\"\"\n return smallestAbsoluteOverlap(x) + seperateZero(x) \n\n##########################################################################################\ndef smallestOverlapSeperateZeroPartial(x):\n \"\"\" minimization functional of classical overlap (|A¹(t)| |A²(t)|) \"\"\"\n return smallestAbsoluteOverlap(x) + seperateZero(x) + partialOverlap(x)\n\n##########################################################################################\ndef smallestOverlapSeperatePeaks(x):\n \"\"\" minimization functional of classical overlap (|A¹(t)| |A²(t)|) \"\"\"\n return seperateZero(x) + seperatePeaks(x) + smallestAbsoluteOverlap(x) \n\n##########################################################################################\ndef monitor(x):\n global iteration\n print (\" Iteration: {:6d}\\r\".format(iteration)),\n iteration+=1\n\n##########################################################################################\ndef constraintNormRead (x):\n \"\"\" norm of reading vector must equal the reading amplitude \"\"\"\n global normRead\n gamma=getGamma(x)\n return normRead**2 - sp.linalg.norm(gamma[:nRead])**2\n\n\n##########################################################################################\ndef constraintNormDown (x):\n \"\"\" norm of writing down vector must equal the down amplitude \"\"\"\n global normDown\n gamma=getGamma(x)\n return normDown**2 - sp.linalg.norm(gamma[nRead:nDown])**2\n\n\n##########################################################################################\ndef constraintNormUp (x):\n \"\"\" norm of writing up vector must equal the up amplitude \"\"\"\n global normUp\n gamma=getGamma(x)\n return normUp**2 - sp.linalg.norm(gamma[nDown:nUp])**2\n\n\n##########################################################################################\ndef constraintWeightDown (x):\n \"\"\" time-evolution-weight of cavity mode down (area under |A(t)|²) fixed to certain value in first half of functional time \"\"\"\n global weight_down,hc_down\n gamma=getGamma(x)\n return weight_down - sp.dot(gamma.conj().T,sp.dot(hc_down,gamma)).real\n\n\n##########################################################################################\ndef constraintWeightUp (x):\n \"\"\" time-evolution-weight of cavity mode up (area under |A(t)|²) fixed to certain value in last half of functional time \"\"\"\n global weight_up,hc_up\n gamma=getGamma(x)\n return weight_up - sp.dot(gamma.conj().T,sp.dot(hc_up,gamma)).real\n\n##########################################################################################\ndef constraintStoreUp(x):\n \"\"\" minimization functional at seperate time-intervals \"\"\"\n global hs_up,cfg,time\n funcTime =float(time['ti'])-float(time['t0'])\n weight=float(cfg ['OCConstraints']['{temporal_weight_limit}'])\n\n gamma=getGamma(x)\n return weight*funcTime - sp.dot( sp.dot(gamma.conj().T,hs_up), gamma).real\n\n\n##########################################################################################\ndef constraintStoreDownIneq(x):\n global cfg,time\n funcTime =float(time['ti'])-float(time['t0'])\n weight=float(cfg ['OCConstraints']['{temporal_weight_limit}'])\n return weight*funcTime - constraintStoreDown(x)\n\ndef constraintStoreDown(x):\n \"\"\" minimization functional at seperate time-intervals \"\"\"\n global hs_down\n gamma=getGamma(x)\n return sp.dot( sp.dot(gamma.conj().T,hs_down), gamma).real\n\n\n##########################################################################################\ndef constraintZeroStartDown(x):\n \"\"\" cavity mode down set to zero at start of functional time \"\"\"\n global cavityRead,cavityMemo,time,nRead,nDown,cfg\n\n ti = int(time['idx_ti' ])-1 # -1 because of python array reference\n\n gamma=getGamma(x)\n\n totalRead=sp.dot(gamma[ :nRead].conj().T,cavityRead[:,ti])\n totalDown=sp.dot(gamma[nRead:nDown].conj().T,cavityMemo[:,ti])\n \n return float(cfg ['OCConstraints']['{limit_init_down}'])-sp.linalg.norm(totalRead+totalDown)\n\n\n##########################################################################################\ndef constraintZeroStartUp(x):\n \"\"\" cavity mode up set to zero at start of functional time \"\"\"\n global cavityRead,cavityMemo,time,nRead,nUp,cfg\n\n ti = int(time['idx_ti' ])-1\n\n gamma=getGamma(x)\n\n totalRead=sp.dot(gamma[ :nRead].conj().T,cavityRead[:,ti])\n totalUp =sp.dot(gamma[nDown:nUp] .conj().T,cavityMemo[:,ti])\n \n return float(cfg ['OCConstraints']['{limit_init_up}'])-sp.linalg.norm(totalRead+totalUp)\n\n\n##########################################################################################\ndef constraintSeperateZero(x):\n \"\"\" minimization functional at seperate time-intervals \"\"\"\n global sepZeroWeight\n return sepZeroWeight - seperateZero(x)\n\n##########################################################################################\ndef seperatePeaks(x):\n \"\"\" minimization of amplitude interval between response peaks \"\"\"\n global hsep\n gamma=getGamma(x)\n return sp.dot( sp.dot(gamma.conj().T,hsep), gamma).real\n\n##########################################################################################\ndef constraintRealMaxCoefficient (x):\n \"\"\" max value of coefficients should be real \"\"\"\n global nUp,nRead\n gamma =getGamma(x)\n max_id =sp.absolute(gamma[:]).argmax() # index of complex value with maximum absolute value in vector x\n \n return gamma[max_id].imag\n \n \n##########################################################################################\n### TODO ... check this\ndef constraintHoleValue(x):\n return constraintHole(x,'value')\n\ndef constraintHoleSlope(x):\n return constraintHole(x,'slope')\n\ndef constraintHoleCurv(x):\n return constraintHole(x,'curv')\n\ndef constraintHole(x,key):\n global hole,nRead,nUp,nDown,nWrite\n\n gamma =getGamma(x)\n\n alphaDown = gamma[nRead:nDown]\n alphaUp = gamma[nDown:nUp]\n \n funDown = sp.dot(gamma[nRead:nDown].conj().T,hole[key][0:nWrite,:])\n funUp = sp.dot(gamma[nDown:nUp].conj().T ,hole[key][0:nWrite,:])\n \n return sp.sqrt(sp.linalg.norm(funDown)**2+sp.linalg.norm(funUp)**2)\n### functionals ##########################################################################\n##########################################################################################\n\n\n### check for arguments, g: generate data, r: read data, in both ways: generate matrices ###\ndef main_routine (baseDir=\"./\",configPath=\"./python/parameter.cfg\",generationType=\"r\"):\n print \"#################################################################\"\n print \"#################################################################\"\n print \"### optimal control #############################################\"\n print \"### preparation of smallest overlap functional ##################\"\n print \"#################################################################\"\n print \"#################################################################\"\n\n ### globals for functional variation\n global time, cavityRead, cavityMemo, \\\n nRead, nDown, nUp, nWrite, dimH, \\\n cfg, \\\n h0, hc_up, hc_down, hs_up, hs_down, hsep, \\\n weight_up, weight_down, sepZeroWeight, \\\n normUp, normDown, normRead, \\\n hole, \\\n iteration\n\n tmpDir=baseDir+\"tmp/\"\n # parser.add_argument(\"--cfg\" , help=\"config path\")\n ### check for arguments, g: generate data, r: read data ###\n\n ### read config file ###\n print (\"load from config file: \" + configPath)\n\n configParser = cp.ConfigParser()\n configParser.read(configPath)\n print (configParser.sections())\n cfg=configParser.__dict__['_sections'].copy() \n\n #for src, target in cfg['NVSETUP'].items():\n # print(src + \" : \" + target)\n\n nRead =int(cfg['OCFourier']['{read_harmonic}'])\n nWrite=int(cfg['OCFourier']['{write_harmonic}'])\n nDown =nRead+nWrite\n nUp =nDown+nWrite\n nTimeRead =int(cfg['OCTime']['{read_timecnt}'])\n dimH =nRead + 2*nWrite\n\n normRead =float(cfg['OCConstraints']['{amplitude_read}']) \n normDown =float(cfg['OCConstraints']['{amplitude_down}']) \n normUp =float(cfg['OCConstraints']['{amplitude_up}']) \n\n name_readwrite = IOHelper.getNameReadWrite(**cfg)\n name_vector = IOHelper.getVectorOverlap(**cfg)\n ### read config file ###\n\n\n ### prepare data with fortran ###\n cmd = \"mkdir -p \" + tmpDir\n print (tmpDir)\n call(cmd.split())\n replace_in_file('./python/py.parNvCenter.F95' , tmpDir +'parNvCenter.F95', **cfg['NVSETUP'])\n replace_in_file('./python/py.parSmallestOverlap.F95', tmpDir +'parSmallestOverlap.F95', **cfg['OCFourier'])\n replace_in_file(tmpDir +'parSmallestOverlap.F95', tmpDir +'parSmallestOverlap.F95', **cfg['OCConstraints'])\n replace_in_file(tmpDir +'parSmallestOverlap.F95', tmpDir +'parSmallestOverlap.F95', **cfg['OCTime'])\n replace_in_file(tmpDir +'parSmallestOverlap.F95', tmpDir +'parSmallestOverlap.F95', **cfg['FILES'])\n\n #write config file\n with open(cfg['FILES']['{prefix}']+\"parameter.cfg\", 'wb') as configfile:\n configParser.write(configfile)\n\n cmd = \"mv \"+tmpDir+\"parSmallestOverlap.F95 \"+baseDir+\"srcOptCntrl/parSmallestOverlap.F95\"\n call(cmd.split())\n cmd = \"mv \"+tmpDir+\"parNvCenter.F95 \"+baseDir+\"srcNv/parNvCenter.F95\"\n call(cmd.split())\n\n print (\"compile fortran routines\")\n cmd = \"./scripts/ifort-generateHarmonics.sh \" + baseDir\n call(cmd.split())\n\n print (\"invoke \" +baseDir +\"generateHarmonics\")\n\n cmd = baseDir+\"generateHarmonics\"\n generateHarmonics = Popen(cmd.split(), stdin=PIPE)\n cmd = \"echo \" + generationType\n generateInput = Popen(cmd.split(), stdout=generateHarmonics.stdin)\n output = generateHarmonics.communicate()[0]\n generateInput.wait()\n ### prepare data with fortran ###\n\n\n ### read data for functional variation ###\n __,cavityMemo,cavityRead =IOHelper.harmonics_readwrite(**cfg)\n time =IOHelper.functionaltimes_readwrite(**cfg)\n\n h0,hs_down,hs_up,hc_down,hc_up,hsep=IOHelper.read_MtrxOverlap(**cfg['FILES']) \n hole =IOHelper.read_HoleData(**cfg['FILES']) \n ### read data for functional variation ###\n\n\n ### functional variation ###\n print (\"\\nstart minimization: \")\n\n varMax = 20\n varStep = 1\n success = False\n\n #constraints for constraintWeightDown and constraintWeightUp\n funcTime =float(time['tfunc'])\n weight_down=float(cfg ['OCConstraints']['{temporal_weight_down}'])*funcTime/2.0\n weight_up =float(cfg ['OCConstraints']['{temporal_weight_up}'])*funcTime/2.0\n\n minContraints= ( {'type' : 'ineq', 'fun' : constraintNormRead },\n {'type' : 'eq', 'fun' : constraintNormDown },\n {'type' : 'eq', 'fun' : constraintNormUp },\n {'type' : 'eq', 'fun' : constraintWeightDown },\n {'type' : 'eq', 'fun' : constraintWeightUp },\n# {'type' : 'eq', 'fun' : constraintRealMaxCoefficient },\n {'type' : 'ineq', 'fun' : constraintZeroStartDown },\n {'type' : 'ineq', 'fun' : constraintZeroStartUp },\n {'type' : 'ineq', 'fun' : constraintPartialOverlap },\n {'type' : 'ineq', 'fun' : constraintStoreUp },\n {'type' : 'ineq', 'fun' : constraintStoreDownIneq },\n#\n # {'type' : 'ineq', 'fun' : constraintSeperateZero },\n# {'type' : 'eq', 'fun' : constraintStoreDown },\n# {'type' : 'eq', 'fun' : constraintHoleValue },\n# {'type' : 'eq', 'fun' : constraintHoleSlope },\n# {'type' : 'eq', 'fun' : constraintHoleCurv },\n )\n\n\n while varStep < varMax and not success :\n gamma0=initGamma0()\n x,x0 =initx(gamma0)\n\n # print (\" calculate sepzero to initialize vector x (method=SLSQP):\")\n # iteration=1\n # res=minimize(seperateZeroStore, #seperateZero, # smallestAbsoluteOverlap,\n # x0, #res.x, \n # method='SLSQP',\n # constraints=sepzeroContraints,\n # tol=cfg['OCConstraints']['{tol_sepzero}'],\n # options={'maxiter' : 25000, 'disp' : True},\n # callback=monitor\n # )\n\n # x0=res.x\n # sepZeroWeight = res.fun\n\n # funcTime =float(time['tfunc'])\n # weight=float(cfg ['OCConstraints']['{temporal_weight_limit}'])\n # sepZeroWeight=weight*funcTime/2e0\n\n print (\" calculate smallest overlap of absolute values (method=SLSQP):\")\n iteration=1\n res=minimize(seperateZero, #smallestOverlapSeperateZero, # smallestOverlapSeperatePeaks, #smallestComplxOverlap, # smallestAbsoluteOverlap, # smallestOverlapSeperateZeroPartial, #\n x0, \n method='SLSQP',\n constraints=minContraints,\n tol=cfg['OCConstraints']['{tol_classic}'],\n options={'maxiter' : 25000, 'disp' : True},\n callback=monitor\n )\n\n success=res.success\n gamma=getGamma(res.x)\n\n print (\"\") \n print (\" current norm(aplhaR) = \" +str(sp.linalg.norm(gamma[ :nRead])))\n print (\" current norm(aplhaD) = \" +str(sp.linalg.norm(gamma[nRead:nDown])))\n print (\" current norm(aplhaU) = \" +str(sp.linalg.norm(gamma[nDown:nUp ])))\n print (\" current partial Olap = \" +str(partialOverlap(res.x)))\n# print (\" current hole(value) = \" +str(constraintHoleValue(res.x)))\n# print (\" current hole(slope) = \" +str(constraintHoleSlope(res.x)))\n# print (\" current hole(value) = \" +str(constraintHoleCurv(res.x)))\n\n \n varStep+=1\n\n if (success):\n print (\"\\ndone with minimization, succeeded:\")\n else:\n print (\"\\ndone with minimization, no success:\")\n\n sp.savetxt(name_vector,sp.array([gamma.real,gamma.conj().imag]).T) \n # [real(gamma), imag(gamma)].conj()\n\n cmd=baseDir+\"generateOptimized\"\n call(cmd.split())\n\n### main routine #########################################################################\n##########################################################################################\n\n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n\n" }, { "alpha_fraction": 0.5879828333854675, "alphanum_fraction": 0.6017167568206787, "avg_line_length": 24.622222900390625, "blob_id": "241c18bdb127d25e0e24f60d1613742ce4e8a3f3", "content_id": "54e3ae9475849dacdaa794d98760350d4bd12dcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1165, "license_type": "no_license", "max_line_length": 64, "num_lines": 45, "path": "/python/coefficients-to-latex.py", "repo_name": "bhartl/optimal-control", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2.7\n#-*- coding:utf-8 -*-\nimport scipy as sp\nimport argh\n\ndef main_routine (filelist,writeCnt=8,readCnt=16):\n files = [line.rstrip('\\n') for line in open(filelist)]\n \n coeffs = sp.zeros([2*writeCnt+readCnt,len(files)],complex)\n \n nRead=readCnt\n nDown=nRead+writeCnt\n nUp =nDown+writeCnt\n \n texDown=writeCnt\n texUp =texDown+writeCnt\n texRead=texUp+readCnt\n \n myFmt = \"\"\n \n for i in sp.arange(len(files)) :\n filename=files[i]\n print \"read: \" + filename\n \n if (myFmt != \"\"):\n myFmt += \" & \"\n \n myFmt = myFmt + \"$%.3f %+.3f~\\im$\"\n\n reGamma,imGamma=sp.loadtxt(filename).T \n alphaR =reGamma[0:nRead] +1j*imGamma[0:nRead]\n alphaD =reGamma[nRead:nDown]+1j*imGamma[nRead:nDown]\n alphaU =reGamma[nDown:nUp] +1j*imGamma[nDown:nUp]\n\n coeffs[ 0:texDown,i]=alphaD \n coeffs[texDown:texUp ,i]=alphaU\n coeffs[texUp :texRead,i]=alphaR\n\n myFmt = \"$\\\\alpha^{}_{}$ & \" + myFmt + \"\\\\\\\\\"\n\n sp.set_printoptions(precision=3)\n sp.savetxt(filelist+\".tbl\",coeffs,delimiter=\"&\",fmt=myFmt) \n\nif __name__ == '__main__':\n argh.dispatch_command(main_routine) \n \n \n" } ]
34
avanetten/spacenet_buildings_exploration
https://github.com/avanetten/spacenet_buildings_exploration
9dceaf8ef505d449fa9f94703c5552e3d90efe35
aa306de5c203787a0d807b6f393a06f8f1fb01b9
05cbffa0101b2b72e7c83dcf8c16b20b65262d46
refs/heads/master
2022-01-18T23:15:54.370837
2019-06-27T02:47:06
2019-06-27T02:47:06
115,126,668
18
12
null
null
null
null
null
[ { "alpha_fraction": 0.7595870494842529, "alphanum_fraction": 0.7949852347373962, "avg_line_length": 36.66666793823242, "blob_id": "c446aca925d36fb74cdf656fb275392c522c4002", "content_id": "6241311f878900719b6ac2ba60822f675feb6515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 678, "license_type": "no_license", "max_line_length": 88, "num_lines": 18, "path": "/README.md", "repo_name": "avanetten/spacenet_buildings_exploration", "src_encoding": "UTF-8", "text": "SpaceNet Buildings Exploration\n\nTransform SpaceNet geojson buidling labels data into raster masks.\nDownload data via:\n\n aws s3api get-object --bucket spacenet-dataset \\\n --key AOI_1_Rio/processedData/processedBuildingLabels.tar.gz \\\n --request-payer requester processedBuildingLabels.tar.gz\n\nDownload spacenet utilities from:\n https://github.com/SpaceNetChallenge/utilities/tree/master/python/spaceNet \n\nFor further details, see:\n https://medium.com/the-downlinq/getting-started-with-spacenet-data-827fd2ec9f53\n\nExample outputs are included in the example_outputs directory\n\n![Alt text](/example_outputs/all_demo/013022223130_Public_img54.png?raw=true \"Figure 1\")\n" }, { "alpha_fraction": 0.5284143686294556, "alphanum_fraction": 0.5447589755058289, "avg_line_length": 37.21977996826172, "blob_id": "38ad15dc1b5a8ce1b9c30e40edc04ba93e6a76df", "content_id": "72bb2ccb3711c21055988527c0e2a91aebb01480", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27838, "license_type": "no_license", "max_line_length": 97, "num_lines": 728, "path": "/spacenet_building_explore.py", "repo_name": "avanetten/spacenet_buildings_exploration", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: avanetten\n\"\"\"\n\n'''\nTransform SpaceNet geojson buidling labels data into raster masks.\nDownload data via:\naws s3api get-object --bucket spacenet-dataset \\\n--key AOI_1_Rio/processedData/processedBuildingLabels.tar.gz \\\n--request-payer requester processedBuildingLabels.tar.gz\n\nDownload spacenet utilities from:\nhttps://github.com/SpaceNetChallenge/utilities/tree/master/python/spaceNet\n\nFor further details, see:\nhttps://medium.com/the-downlinq/getting-started-with-spacenet-data-827fd2ec9f53\n'''\n\nfrom __future__ import print_function\nfrom matplotlib.collections import PatchCollection\nfrom osgeo import gdal, osr, ogr, gdalnumeric\nfrom matplotlib.patches import Polygon\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport shutil\nimport json\nimport glob\nimport sys\nimport os\n\n\n####################\n# EDIT THESE PATHS\n# download spacenet utilities from:\n# https://github.com/SpaceNetChallenge/utilities/tree/master/python/spaceNet \npath_to_spacenet_utils = '/path_to_spacenet_utils'\n# Set data dir\nspacenet_data_dir = '/path_to_spacenet_data'\n# This is the directory where this script is located\nspacenet_explore_dir = os.path.dirname(os.path.realpath(__file__))\n# exclore N images in 3band data\nN_ims = 15\n####################\n\n# import packages\nsys.path.extend([path_to_spacenet_utils])\nfrom spaceNetUtilities import geoTools as gT\n\n \n############################################################################### \ndef geojson_to_pixel_arr(raster_file, geojson_file, pixel_ints=True,\n verbose=False):\n '''\n Tranform geojson file into array of points in pixel (and latlon) coords\n pixel_ints = 1 sets pixel coords as integers\n '''\n \n # load geojson file\n with open(geojson_file) as f:\n geojson_data = json.load(f)\n\n # load raster file and get geo transforms\n src_raster = gdal.Open(raster_file)\n targetsr = osr.SpatialReference()\n targetsr.ImportFromWkt(src_raster.GetProjectionRef())\n \n geom_transform = src_raster.GetGeoTransform()\n if verbose:\n print (\"geom_transform:\", geom_transform)\n \n # get latlon coords\n latlons = []\n types = []\n for feature in geojson_data['features']:\n coords_tmp = feature['geometry']['coordinates'][0]\n type_tmp = feature['geometry']['type']\n if verbose: \n print (\"features:\", feature.keys())\n print (\"geometry:features:\", feature['geometry'].keys())\n\n #print \"feature['geometry']['coordinates'][0]\", z\n latlons.append(coords_tmp)\n types.append(type_tmp)\n #print feature['geometry']['type']\n \n # convert latlons to pixel coords\n pixel_coords = []\n latlon_coords = []\n for i, (poly_type, poly0) in enumerate(zip(types, latlons)):\n \n if poly_type.upper() == 'MULTIPOLYGON':\n #print \"oops, multipolygon\"\n for poly in poly0:\n poly=np.array(poly)\n if verbose:\n print (\"poly.shape:\", poly.shape)\n \n # account for nested arrays\n if len(poly.shape) == 3 and poly.shape[0] == 1:\n poly = poly[0]\n \n poly_list_pix = []\n poly_list_latlon = []\n if verbose: \n print (\"poly\", poly)\n for coord in poly:\n if verbose: \n print (\"coord:\", coord)\n lon, lat, z = coord \n px, py = gT.latlon2pixel(lat, lon, input_raster=src_raster, \n targetsr=targetsr, \n geom_transform=geom_transform)\n poly_list_pix.append([px, py])\n if verbose:\n print (\"px, py\", px, py)\n poly_list_latlon.append([lat, lon])\n \n if pixel_ints:\n ptmp = np.rint(poly_list_pix).astype(int)\n else:\n ptmp = poly_list_pix\n pixel_coords.append(ptmp)\n latlon_coords.append(poly_list_latlon) \n\n elif poly_type.upper() == 'POLYGON':\n poly=np.array(poly0)\n if verbose:\n print (\"poly.shape:\", poly.shape)\n \n # account for nested arrays\n if len(poly.shape) == 3 and poly.shape[0] == 1:\n poly = poly[0]\n \n poly_list_pix = []\n poly_list_latlon = []\n if verbose: \n print (\"poly\", poly)\n for coord in poly:\n if verbose: \n print (\"coord:\", coord)\n lon, lat, z = coord \n px, py = gT.latlon2pixel(lat, lon, input_raster=src_raster, \n targetsr=targetsr, \n geom_transform=geom_transform)\n poly_list_pix.append([px, py])\n if verbose:\n print (\"px, py\", px, py)\n poly_list_latlon.append([lat, lon])\n \n if pixel_ints:\n ptmp = np.rint(poly_list_pix).astype(int)\n else:\n ptmp = poly_list_pix\n pixel_coords.append(ptmp)\n latlon_coords.append(poly_list_latlon)\n \n elif poly_type.upper() == 'POINT':\n print (\"Skipping shape type: POINT in geojson_to_pixel_arr()\")\n continue\n \n else:\n print (\"Unknown shape type:\", poly_type, \" in geojson_to_pixel_arr()\")\n return\n \n return pixel_coords, latlon_coords\n\n###############################################################################\ndef create_building_mask(rasterSrc, vectorSrc, npDistFileName='', \n noDataValue=0, burn_values=1):\n\n '''\n Create building mask for rasterSrc,\n Similar to labeltools/createNPPixArray() in spacenet utilities\n '''\n \n ## open source vector file that truth data\n source_ds = ogr.Open(vectorSrc)\n source_layer = source_ds.GetLayer()\n\n ## extract data from src Raster File to be emulated\n ## open raster file that is to be emulated\n srcRas_ds = gdal.Open(rasterSrc)\n cols = srcRas_ds.RasterXSize\n rows = srcRas_ds.RasterYSize\n\n ## create First raster memory layer, units are pixels\n # Change output to geotiff instead of memory \n memdrv = gdal.GetDriverByName('GTiff') \n dst_ds = memdrv.Create(npDistFileName, cols, rows, 1, gdal.GDT_Byte, \n options=['COMPRESS=LZW'])\n dst_ds.SetGeoTransform(srcRas_ds.GetGeoTransform())\n dst_ds.SetProjection(srcRas_ds.GetProjection())\n band = dst_ds.GetRasterBand(1)\n band.SetNoDataValue(noDataValue) \n gdal.RasterizeLayer(dst_ds, [1], source_layer, burn_values=[burn_values])\n dst_ds = 0\n \n return \n\n###############################################################################\ndef create_dist_map(rasterSrc, vectorSrc, npDistFileName='', \n noDataValue=0, burn_values=1, \n dist_mult=1, vmax_dist=64):\n\n '''\n Create building signed distance transform from Yuan 2016 \n (https://arxiv.org/pdf/1602.06564v1.pdf).\n vmax_dist: absolute value of maximum distance (meters) from building edge\n Adapged from createNPPixArray in labeltools\n '''\n \n ## open source vector file that truth data\n source_ds = ogr.Open(vectorSrc)\n source_layer = source_ds.GetLayer()\n\n ## extract data from src Raster File to be emulated\n ## open raster file that is to be emulated\n srcRas_ds = gdal.Open(rasterSrc)\n cols = srcRas_ds.RasterXSize\n rows = srcRas_ds.RasterYSize\n\n geoTrans, poly, ulX, ulY, lrX, lrY = gT.getRasterExtent(srcRas_ds)\n transform_WGS84_To_UTM, transform_UTM_To_WGS84, utm_cs \\\n = gT.createUTMTransform(poly)\n line = ogr.Geometry(ogr.wkbLineString)\n line.AddPoint(geoTrans[0], geoTrans[3])\n line.AddPoint(geoTrans[0]+geoTrans[1], geoTrans[3])\n\n line.Transform(transform_WGS84_To_UTM)\n metersIndex = line.Length()\n\n memdrv = gdal.GetDriverByName('MEM')\n dst_ds = memdrv.Create('', cols, rows, 1, gdal.GDT_Byte)\n dst_ds.SetGeoTransform(srcRas_ds.GetGeoTransform())\n dst_ds.SetProjection(srcRas_ds.GetProjection())\n band = dst_ds.GetRasterBand(1)\n band.SetNoDataValue(noDataValue)\n\n gdal.RasterizeLayer(dst_ds, [1], source_layer, burn_values=[burn_values])\n srcBand = dst_ds.GetRasterBand(1)\n\n memdrv2 = gdal.GetDriverByName('MEM')\n prox_ds = memdrv2.Create('', cols, rows, 1, gdal.GDT_Int16)\n prox_ds.SetGeoTransform(srcRas_ds.GetGeoTransform())\n prox_ds.SetProjection(srcRas_ds.GetProjection())\n proxBand = prox_ds.GetRasterBand(1)\n proxBand.SetNoDataValue(noDataValue)\n\n opt_string = 'NODATA='+str(noDataValue)\n options = [opt_string]\n\n gdal.ComputeProximity(srcBand, proxBand, options)\n\n memdrv3 = gdal.GetDriverByName('MEM')\n proxIn_ds = memdrv3.Create('', cols, rows, 1, gdal.GDT_Int16)\n proxIn_ds.SetGeoTransform(srcRas_ds.GetGeoTransform())\n proxIn_ds.SetProjection(srcRas_ds.GetProjection())\n proxInBand = proxIn_ds.GetRasterBand(1)\n proxInBand.SetNoDataValue(noDataValue)\n opt_string2 = 'VALUES='+str(noDataValue)\n options = [opt_string, opt_string2]\n #options = ['NODATA=0', 'VALUES=0']\n\n gdal.ComputeProximity(srcBand, proxInBand, options)\n\n proxIn = gdalnumeric.BandReadAsArray(proxInBand)\n proxOut = gdalnumeric.BandReadAsArray(proxBand)\n\n proxTotal = proxIn.astype(float) - proxOut.astype(float)\n proxTotal = proxTotal*metersIndex\n proxTotal *= dist_mult\n\n # clip array\n proxTotal = np.clip(proxTotal, -1*vmax_dist, 1*vmax_dist)\n\n if npDistFileName != '':\n # save as numpy file since some values will be negative\n np.save(npDistFileName, proxTotal)\n #cv2.imwrite(npDistFileName, proxTotal)\n\n #return proxTotal\n return\n\n###############################################################################\ndef plot_truth_coords(input_image, pixel_coords, \n figsize=(8,8), plot_name='',\n add_title=False, poly_face_color='orange', \n poly_edge_color='red', poly_nofill_color='blue', cmap='bwr'):\n '''Plot ground truth coordinaates, pixel_coords should be a numpy array'''\n \n fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(2*figsize[0], figsize[1]))\n \n if add_title:\n suptitle = fig.suptitle(plot_name.split('/')[-1], fontsize='large')\n \n # create patches\n patches = []\n patches_nofill = []\n if len(pixel_coords) > 0:\n # get patches \n for coord in pixel_coords:\n patches_nofill.append(Polygon(coord, facecolor=poly_nofill_color, \n edgecolor=poly_edge_color, lw=3))\n patches.append(Polygon(coord, edgecolor=poly_edge_color, fill=True, \n facecolor=poly_face_color))\n p0 = PatchCollection(patches, alpha=0.25, match_original=True)\n #p1 = PatchCollection(patches, alpha=0.75, match_original=True)\n p2 = PatchCollection(patches_nofill, alpha=0.75, match_original=True)\n \n # ax0: raw image\n ax0.imshow(input_image)\n if len(patches) > 0:\n ax0.add_collection(p0)\n ax0.set_title('Input Image + Ground Truth Buildings') \n \n # truth polygons\n zero_arr = np.zeros(input_image.shape[:2])\n # set background to white?\n #zero_arr[zero_arr == 0.0] = np.nan\n ax1.imshow(zero_arr, cmap=cmap)\n if len(patches) > 0:\n ax1.add_collection(p2)\n ax1.set_title('Ground Truth Building Polygons')\n \n #plt.axis('off')\n plt.tight_layout()\n if add_title:\n suptitle.set_y(0.95)\n fig.subplots_adjust(top=0.96)\n #plt.show()\n \n if len(plot_name) > 0:\n plt.savefig(plot_name)\n \n return patches, patches_nofill\n \n \n###############################################################################\ndef plot_building_mask(input_image, pixel_coords, mask_image, \n figsize=(8,8), plot_name='',\n add_title=False, poly_face_color='orange', \n poly_edge_color='red', poly_nofill_color='blue', cmap='bwr'):\n\n\n fig, (ax0, ax1, ax2) = plt.subplots(1, 3, \n figsize=(3*figsize[0], figsize[1]))\n \n if add_title:\n suptitle = fig.suptitle(plot_name.split('/')[-1], fontsize='large')\n\n # create patches\n patches = []\n patches_nofill = []\n if len(pixel_coords) > 0:\n # get patches \n for coord in pixel_coords:\n patches_nofill.append(Polygon(coord, facecolor=poly_nofill_color, \n edgecolor=poly_edge_color, lw=3))\n patches.append(Polygon(coord, edgecolor=poly_edge_color, fill=True, \n facecolor=poly_face_color))\n p0 = PatchCollection(patches, alpha=0.25, match_original=True)\n p1 = PatchCollection(patches_nofill, alpha=0.75, match_original=True)\n \n #if len(patches) > 0:\n # p0 = PatchCollection(patches, alpha=0.25, match_original=True)\n # #p1 = PatchCollection(patches, alpha=0.75, match_original=True)\n # p1 = PatchCollection(patches_nofill, alpha=0.75, match_original=True) \n \n # ax0: raw image\n ax0.imshow(input_image)\n if len(patches) > 0:\n ax0.add_collection(p0)\n ax0.set_title('Input Image + Ground Truth Buildings') \n \n # truth polygons\n zero_arr = np.zeros(input_image.shape[:2])\n # set background to white?\n #zero_arr[zero_arr == 0.0] = np.nan\n ax1.imshow(zero_arr, cmap=cmap)\n if len(patches) > 0:\n ax1.add_collection(p1)\n ax1.set_title('Ground Truth Building Polygons')\n \n # old method of truth, with mask\n ## ax0: raw image\n #ax0.imshow(input_image)\n ## ground truth\n ## set zeros to nan\n #palette = plt.cm.gray\n #palette.set_over('orange', 1.0)\n #z = mask_image.astype(float)\n #z[z==0] = np.nan\n #ax0.imshow(z, cmap=palette, alpha=0.25, \n # norm=matplotlib.colors.Normalize(vmin=0.5, vmax=0.9, clip=False))\n #ax0.set_title('Input Image + Ground Truth Buildings') \n \n # mask\n ax2.imshow(mask_image, cmap=cmap)\n # truth polygons?\n #if len(patches) > 0:\n # ax1.add_collection(p1)\n ax2.set_title('Ground Truth Building Mask') \n \n #plt.axis('off')\n plt.tight_layout()\n if add_title:\n suptitle.set_y(0.95)\n fig.subplots_adjust(top=0.96)\n #plt.show()\n \n if len(plot_name) > 0:\n plt.savefig(plot_name)\n \n return\n\n###############################################################################\ndef plot_dist_transform(input_image, pixel_coords, dist_image, \n figsize=(8,8), plot_name='', add_title=False, \n colorbar=True,\n poly_face_color='orange', poly_edge_color='red', \n poly_nofill_color='blue', cmap='bwr'):\n '''Explore distance transform'''\n\n fig, (ax0, ax1, ax2) = plt.subplots(1, 3, \n figsize=(3*figsize[0], figsize[1]))\n\n #fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(2*figsize[0], figsize[1]))\n mind, maxd = np.round(np.min(dist_image),2), np.round(np.max(dist_image),2)\n \n if add_title:\n suptitle = fig.suptitle(plot_name.split('/')[-1], fontsize='large')\n\n # create patches\n patches = []\n patches_nofill = []\n if len(pixel_coords) > 0:\n # get patches \n for coord in pixel_coords:\n patches_nofill.append(Polygon(coord, facecolor=poly_nofill_color, \n edgecolor=poly_edge_color, lw=3))\n patches.append(Polygon(coord, edgecolor=poly_edge_color, fill=True, \n facecolor=poly_face_color))\n p0 = PatchCollection(patches, alpha=0.25, match_original=True)\n p1 = PatchCollection(patches, alpha=0.75, match_original=True)\n #p2 = PatchCollection(patches_nofill, alpha=0.75, match_original=True)\n \n #if len(patches) > 0:\n # p0 = PatchCollection(patches, alpha=0.25, match_original=True)\n # p1 = PatchCollection(patches, alpha=0.75, match_original=True)\n \n \n # ax0: raw image\n ax0.imshow(input_image)\n if len(patches) > 0:\n ax0.add_collection(p0)\n ax0.set_title('Input Image + Ground Truth Buildings') \n \n ## truth polygons\n #zero_arr = np.zeros(input_image.shape[:2])\n ## set background to white?\n ##zero_arr[zero_arr == 0.0] = np.nan\n #ax1.imshow(zero_arr, cmap=cmap)\n #if len(patches) > 0:\n # ax1.add_collection(p1)\n #ax1.set_title('Ground Truth Building Outlines')\n \n # transform\n cbar_pointer = ax1.imshow(dist_image)\n dist_suffix = \" (min=\" + str(mind) + \", max=\" + str(maxd) + \")\"\n ax1.set_title(\"Yuan 2016 Distance Transform\" + dist_suffix)\n \n # overlay buildings on distance transform\n ax2.imshow(dist_image)\n # truth polygons\n if len(patches) > 0:\n ax2.add_collection(p1)\n # truth mask\n #ax2.imshow(z, cmap=palette, alpha=0.5, \n # norm=matplotlib.colors.Normalize(vmin=0.5, vmax=0.9, clip=False))\n ax2.set_title(\"Ground Truth Polygons Overlaid on Distance Transform\")\n \n if colorbar:\n #from mpl_toolkits.axes_grid1 import make_axes_locatable\n #divider = make_axes_locatable(ax2)\n #cax = divider.append_axes('right', size='5%', pad=0.05)\n #fig.colorbar(cbar_pointer, cax=cax, orientation='vertical')\n left, bottom, width, height = [0.38, 0.85, 0.24, 0.03]\n cax = fig.add_axes([left, bottom, width, height])\n fig.colorbar(cbar_pointer, cax=cax, orientation='horizontal')\n\n #plt.axis('off')\n plt.tight_layout()\n if add_title:\n suptitle.set_y(0.95)\n fig.subplots_adjust(top=0.96)\n #plt.show()\n \n if len(plot_name) > 0:\n plt.savefig(plot_name)\n \n return\n \n###############################################################################\ndef plot_all_transforms(input_image, pixel_coords, mask_image, dist_image, \n figsize=(8,8), plot_name='', add_global_title=False, \n colorbar=False, add_titles=False,\n poly_face_color='orange', poly_edge_color='red', \n poly_nofill_color='blue', cmap='bwr'):\n '''Explore all transforms'''\n\n fig, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4, \n figsize=(4*figsize[0], figsize[1]))\n\n #fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(2*figsize[0], figsize[1]))\n \n if add_global_title:\n suptitle = fig.suptitle(plot_name.split('/')[-1], fontsize='large')\n\n # create patches\n patches = []\n patches_nofill = []\n if len(pixel_coords) > 0:\n # get patches \n for coord in pixel_coords:\n patches_nofill.append(Polygon(coord, facecolor=poly_nofill_color, \n edgecolor=poly_edge_color, lw=3))\n patches.append(Polygon(coord, edgecolor=poly_edge_color, fill=True, \n facecolor=poly_face_color))\n p0 = PatchCollection(patches, alpha=0.25, match_original=True)\n #p1 = PatchCollection(patches, alpha=0.75, match_original=True)\n p2 = PatchCollection(patches_nofill, alpha=0.75, match_original=True)\n \n #if len(patches) > 0:\n # p0 = PatchCollection(patches, alpha=0.25, match_original=True)\n # p1 = PatchCollection(patches, alpha=0.75, match_original=True)\n \n \n # ax0: raw image\n ax0.imshow(input_image)\n if len(patches) > 0:\n ax0.add_collection(p0)\n if add_titles:\n ax0.set_title('Input Image + Ground Truth Buildings') \n\n # truth polygons\n zero_arr = np.zeros(input_image.shape[:2])\n # set background to white?\n #zero_arr[zero_arr == 0.0] = np.nan\n ax1.imshow(zero_arr, cmap=cmap)\n if len(patches) > 0:\n ax1.add_collection(p2)\n if add_titles:\n ax1.set_title('Ground Truth Building Polygons') \n\n # mask\n ax2.imshow(mask_image, cmap=cmap)\n # truth polygons?\n #if len(patches) > 0:\n # ax1.add_collection(p1)\n if add_titles:\n ax2.set_title('Ground Truth Building Mask') \n\n # distance transform\n cbar_pointer = ax3.imshow(dist_image)\n # overlay buildings on distance transform? \n #if len(patches) > 0:\n # ax3.add_collection(p1)\n if add_titles:\n #mind, maxd = np.round(np.min(dist_image),2), \\\n # np.round(np.max(dist_image),2)\n #dist_suffix = \"\"#\" (min=\" + str(mind) + \", max=\" + str(maxd) + \")\"\n #ax3.set_title(\"Yuan 2016 Distance Transform\" + dist_suffix)\n ax3.set_title(\"Ground Truth Polygons Overlaid on Distance Transform\")\n \n if colorbar:\n #from mpl_toolkits.axes_grid1 import make_axes_locatable\n #divider = make_axes_locatable(ax2)\n #cax = divider.append_axes('right', size='5%', pad=0.05)\n #fig.colorbar(cbar_pointer, cax=cax, orientation='vertical')\n left, bottom, width, height = [0.38, 0.85, 0.24, 0.03]\n cax = fig.add_axes([left, bottom, width, height])\n fig.colorbar(cbar_pointer, cax=cax, orientation='horizontal')\n\n #plt.axis('off')\n plt.tight_layout()\n if add_global_title:\n suptitle.set_y(0.95)\n fig.subplots_adjust(top=0.96)\n #plt.show()\n \n if len(plot_name) > 0:\n plt.savefig(plot_name)\n \n return \n \n \n#%% \n###############################################################################\ndef main(): \n\n imDir = os.path.join(spacenet_data_dir, '3band')\n vecDir = os.path.join(spacenet_data_dir, 'vectorData/geoJson')\n imDir_out = os.path.join(spacenet_explore_dir, '3band')\n\n ground_truth_patches = []\n pos_val, pos_val_vis = 1, 255\n \n ########################\n # Create directories\n\n #coordsDir = spacenet_explore_dir + 'pixel_coords_mask/'\n coords_demo_dir = os.path.join(spacenet_explore_dir, 'pixel_coords_demo')\n\n maskDir = os.path.join(spacenet_explore_dir, 'building_mask')\n maskDir_vis = os.path.join(spacenet_explore_dir, 'building_mask_vis')\n mask_demo_dir = os.path.join(spacenet_explore_dir, 'mask_demo')\n\n distDir = os.path.join(spacenet_explore_dir, 'distance_trans')\n dist_demo_dir = os.path.join(spacenet_explore_dir, 'distance_trans_demo')\n \n all_demo_dir = os.path.join(spacenet_explore_dir, 'all_demo')\n\n # make dirs\n for p in [imDir_out, coords_demo_dir, maskDir, maskDir_vis, mask_demo_dir,\n distDir, dist_demo_dir, all_demo_dir]:\n if not os.path.exists(p):\n os.mkdir(p)\n\n # get input images and copy to working directory\n rasterList = glob.glob(os.path.join(imDir, '*.tif'))[10:10+N_ims] \n for im_tmp in rasterList:\n shutil.copy(im_tmp, imDir_out)\n \n # Create masks and demo images\n pixel_coords_list = []\n for i,rasterSrc in enumerate(rasterList):\n \n print (i, \"Evaluating\", rasterSrc)\n\n input_image = plt.imread(rasterSrc) # cv2.imread(rasterSrc, 1)\n \n # get name root\n name_root0 = rasterSrc.split('/')[-1].split('.')[0]\n # remove 3band or 8band prefix\n name_root = name_root0[6:]\n vectorSrc = os.path.join(vecDir, name_root + '_Geo.geojson')\n maskSrc = os.path.join(maskDir, name_root0 + '.tif')\n \n ####################################################\n # pixel coords and ground truth patches\n pixel_coords, latlon_coords = \\\n geojson_to_pixel_arr(rasterSrc, vectorSrc, \n pixel_ints=True,\n verbose=False)\n pixel_coords_list.append(pixel_coords)\n \n plot_name = os.path.join(coords_demo_dir, name_root + '.png')\n patch_collection, patch_coll_nofill = \\\n plot_truth_coords(input_image, pixel_coords, \n figsize=(8,8), plot_name=plot_name,\n add_title=False)\n ground_truth_patches.append(patch_collection)\n plt.close('all')\n ####################################################\n \n ####################################################\n #building mask\n outfile = os.path.join(maskDir, name_root0 + '.tif')\n outfile_vis = os.path.join(maskDir_vis, name_root0 + '.tif')\n \n # create mask from 0-1 and mask from 0-255 (for visual inspection)\n create_building_mask(rasterSrc, vectorSrc, \n npDistFileName=outfile, \n burn_values=pos_val)\n create_building_mask(rasterSrc, vectorSrc, \n npDistFileName=outfile_vis, \n burn_values=pos_val_vis)\n \n plot_name = os.path.join(mask_demo_dir, name_root + '.png')\n mask_image = plt.imread(outfile) # cv2.imread(outfile, 0)\n plot_building_mask(input_image, pixel_coords,\n mask_image,\n figsize=(8,8), plot_name=plot_name,\n add_title=False) \n plt.close('all')\n #################################################### \n \n ####################################################\n # signed distance transform\n # remove 3band or 8band prefix\n outfile = os.path.join(distDir, name_root0 + '.npy')#'.tif' \n create_dist_map(rasterSrc, vectorSrc, \n npDistFileName=outfile, \n noDataValue=0, burn_values=pos_val, \n dist_mult=1, vmax_dist=64)\n # plot\n #plot_name = os.path.join(dist_demo_dir + name_root, '_no_colorbar.png')\n plot_name = os.path.join(dist_demo_dir, name_root + '.png')\n mask_image = plt.imread(maskSrc) # cv2.imread(maskSrc, 0)\n dist_image = np.load(outfile)\n plot_dist_transform(input_image, pixel_coords, \n dist_image, figsize=(8,8),\n plot_name=plot_name, \n add_title=False,\n colorbar=True)#False)\n plt.close('all')\n ####################################################\n\n ####################################################\n # plot all transforms\n plot_name = os.path.join(all_demo_dir, name_root + '.png')#+ '_titles.png'\n mask_image = plt.imread(maskSrc) # cv2.imread(maskSrc, 0)\n dist_image = np.load(outfile)\n plot_all_transforms(input_image, pixel_coords, mask_image, dist_image, \n figsize=(8,8), plot_name=plot_name, add_global_title=False, \n colorbar=False, \n add_titles=False,#True,\n poly_face_color='orange', poly_edge_color='red', \n poly_nofill_color='blue', cmap='bwr') \n plt.close('all')\n ####################################################\n\n\n############################################################################### \nif __name__ == '__main__':\n main() \n" } ]
2
dsguo/TGen-2016-RNA-Fusion-Project
https://github.com/dsguo/TGen-2016-RNA-Fusion-Project
2889ebfc63ccea042acaf15fb8601243c80214c3
1d292e5d1407c65ea9d45506cbbc6a239c6f3587
ce034e8e27da7b9e051af0f381a349ff22834d85
refs/heads/master
2021-01-20T17:54:16.738144
2016-08-02T23:59:14
2016-08-02T23:59:14
64,410,981
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5903940796852112, "alphanum_fraction": 0.6109606027603149, "avg_line_length": 44.11666488647461, "blob_id": "ae569caf51e716b1c9f3c395755315fc4f5f7f37", "content_id": "740fafb0651cfe3a76a093204855d3a32f6ab71f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8120, "license_type": "no_license", "max_line_length": 136, "num_lines": 180, "path": "/Edit_TopHat-Fusion_Output/Process_Discordant_SAM_File_Guo.py", "repo_name": "dsguo/TGen-2016-RNA-Fusion-Project", "src_encoding": "UTF-8", "text": "# Usage\n# python Process_Discordant_SAM_File.py <Collapsed_TophatFusion_File> <Discordant_Reads.sam>\n\n# requires python 3.4 with pandas, numpy, pysam packages\n\n#Configure Enviroment\nimport pandas as pd\nimport numpy as np\nimport pysam\nimport sys\n\n\n# Function to process SAM file from SAMBLASTER DISCORDANT EXPORT\ndef bam_to_df(bam, chr = None, start=None, stop = None):\n seq = []\n name = []\n r1_chr = []\n r1_pos = []\n r1_isRead1 = []\n r1_isReversed = []\n r1_cigar = []\n r1_mapq = []\n r2_isReversed = []\n r2_chr = []\n r2_pos = []\n frag_length = []\n r2_cigar = []\n r2_mapq = []\n for read in bam.fetch(chr, start, stop):\n seq.append(read.query_sequence)\n name.append(read.query_name)\n r1_chr.append(read.reference_name)\n r1_pos.append(read.reference_start)\n r1_isRead1.append(read.is_read1)\n r1_isReversed.append(read.is_reverse)\n r1_cigar.append(read.cigarstring)\n r1_mapq.append(read.mapping_quality)\n r2_isReversed.append(read.mate_is_reverse)\n r2_chr.append(read.next_reference_name)\n r2_pos.append(read.next_reference_start)\n frag_length.append(read.template_length)\n if read.has_tag('MC') == True:\n r2_cigar.append(read.get_tag('MC'))\n else:\n r2_cigar.append('NA')\n if read.has_tag('MQ') == True:\n r2_mapq.append(read.get_tag('MQ'))\n else:\n r2_mapq.append(255)\n return pd.DataFrame({'seq': seq,\n 'name': name,\n 'r1_pos': r1_pos,\n 'r1_chr': r1_chr,\n 'r1_isRead1': r1_isRead1,\n 'r1_isReversed': r1_isReversed,\n 'r1_cigar': r1_cigar,\n 'r1_mapq': r1_mapq,\n 'r2_isReversed': r2_isReversed,\n 'r2_chr': r2_chr,\n 'r2_pos': r2_pos,\n 'frag_length': frag_length,\n 'r2_cigar': r2_cigar,\n 'r2_mapq': r2_mapq})\n\n\n# Import Collapsed Tophat-Fusion table\ntf_table_file = tf_file = sys.argv[1]\ntf_table = pd.read_csv(tf_table_file, sep=\"\\t\")\n\n# Create python object for the SAM file\ndisc_sam_file = sys.argv[2]\nsamfile = pysam.AlignmentFile(disc_sam_file, \"r\")\n\n# Call Function to convert SAM file into pandas dataframe, and add additional columns so original chromosome columns have integer values\n# chromosomes are already in string format\nchrList = range(23)\nstrChrList = [\"{:01d}\".format(x) for x in chrList]\ntable = bam_to_df(samfile)\ntable[\"r1_chr_int\"] = np.nan\ntable[\"r2_chr_int\"]= np.nan\nfor row in table.index:\n if table.at[row, \"r1_chr\"] in strChrList or table.at[row, \"r1_chr\"] in range(23):\n table.set_value(row, \"r1_chr_int\", table.at[row, \"r1_chr\"])\n elif table.at[row, \"r1_chr\"] == \"X\":\n table.set_value(row, \"r1_chr_int\", 23)\n elif table.at[row, \"r1_chr\"] == \"Y\":\n table.set_value(row, \"r1_chr_int\", 24)\n elif table.at[row, \"r1_chr\"] == \"M\": # or MT or mitochondrial chromosome calls\n table.set_value(row, \"r1_chr_int\", 25)\n\nfor row in table.index:\n if table.at[row, \"r2_chr\"] in strChrList or table.at[row, \"r2_chr\"] in range(23):\n table.set_value(row, \"r2_chr_int\", table.at[row, \"r2_chr\"])\n elif table.at[row, \"r2_chr\"] == \"X\":\n table.set_value(row, \"r2_chr_int\", 23)\n elif table.at[row, \"r2_chr\"] == \"Y\":\n table.set_value(row, \"r2_chr_int\", 24)\n elif table.at[row, \"r2_chr\"] == \"M\": # or MT or mitochondrial chromosome calls\n table.set_value(row, \"r2_chr_int\", 25)\n\n# Extract just those rows that are the first read of the pair\nfirstReadTable = table[table.r1_isRead1 == True]\n\n# Add needed columns to the collapsed tophat file\n# Create new columns on the unique gene pairs table\ntf_table['discordantFrag_Count'] = np.nan\ntf_table['discordantFrag_For_Count'] = np.nan\ntf_table['discordantFrag_Rev_Count'] = np.nan\n\n# add columns to collapsed table so original chromosome columns have integer values\ntf_table[\"firstGene_Chr_Int\"] = np.nan\ntf_table[\"secondGene_Chr_Int\"]= np.nan\nfor row in tf_table.index:\n if tf_table.at[row, \"firstGene_Chr\"] in range(23) or tf_table.at[row, \"firstGene_Chr\"] in strChrList:\n tf_table.set_value(row, \"firstGene_Chr_Int\", tf_table.at[row, \"firstGene_Chr\"])\n elif tf_table.at[row, \"firstGene_Chr\"] == \"X\":\n tf_table.set_value(row, \"firstGene_Chr_Int\", 23)\n elif tf_table.at[row, \"firstGene_Chr\"] == \"Y\":\n tf_table.set_value(row, \"firstGene_Chr_Int\", 24)\n elif tf_table.at[row, \"firstGene_Chr\"] == \"M\": # or MT or mitochondrial chromosome calls\n tf_table.set_value(row, \"firstGene_Chr_Int\", 25)\n\nfor row in tf_table.index:\n if tf_table.at[row, \"secondGene_Chr\"] in range(23) or tf_table.at[row, \"secondGene_Chr\"] in strChrList:\n tf_table.set_value(row, \"secondGene_Chr_Int\", tf_table.at[row, \"secondGene_Chr\"])\n elif tf_table.at[row, \"secondGene_Chr\"] == \"X\":\n tf_table.set_value(row, \"secondGene_Chr_Int\", 23)\n elif tf_table.at[row, \"secondGene_Chr\"] == \"Y\":\n tf_table.set_value(row, \"secondGene_Chr_Int\", 24)\n elif tf_table.at[row, \"secondGene_Chr\"] == \"M\": # or MT or mitochondrial chromosome calls\n tf_table.set_value(row, \"secondGene_Chr_Int\", 25)\n\n\n### THIS NEEDS TO BE A LOOP OF THE IMPORTED COLLAPPSED TOPHAT FUSION TABLE - updates the collapsed table :)\n# Create test variables - THES ARE PRE_CALCULATED IN THE IMPORT\nfor row in tf_table.index:\n window1_chr=tf_table.at[row, 'firstGene_Chr_Int']\n window1_start=tf_table.at[row, 'firstGene_Window_Start']\n window1_end=tf_table.at[row, 'firstGene_Window_End']\n window2_chr=tf_table.at[row, 'secondGene_Chr_Int']\n window2_start=tf_table.at[row, 'secondGene_Window_Start']\n window2_end=tf_table.at[row, 'secondGene_Window_End']\n # extract possible pairs\n # because there can be two derivatives you need to test for both possible orrientations\n result_table = firstReadTable[((firstReadTable.r1_chr_int == window1_chr) &\n (firstReadTable.r1_pos >= window1_start) &\n (firstReadTable.r1_pos <= window1_end) &\n (firstReadTable.r2_chr_int == window2_chr) &\n (firstReadTable.r2_pos >= window2_start) &\n (firstReadTable.r2_pos <= window2_end)) |\n ((firstReadTable.r1_chr_int == window2_chr) &\n (firstReadTable.r1_pos >= window2_start) &\n (firstReadTable.r1_pos <= window2_end) &\n (firstReadTable.r2_chr_int == window1_chr) &\n (firstReadTable.r2_pos >= window1_start) &\n (firstReadTable.r2_pos <= window1_end))]\n # Get table length\n discordantFrag_CountNum = len(result_table.index)\n tf_table.set_value(row, 'discordantFrag_Count', discordantFrag_CountNum) # add value to collapsed table\n # Make tables for both possible derivatives\n # Read1 aligned to forward strand\n r1_for_result_table = result_table[result_table.r1_isReversed == False]\n r1_for_count = len(r1_for_result_table.index)\n tf_table.set_value(row, 'discordantFrag_For_Count', r1_for_count) # add value to collapsed table\n # Read1 aligned to reverse strand\n r1_rev_result_table = result_table[result_table.r1_isReversed == True]\n r1_rev_count = len(r1_rev_result_table.index)\n tf_table.set_value(row, 'discordantFrag_Rev_Count', r1_rev_count) # add value to collapsed table\n\n\n# Create tables to figure out breakpoint locations\n# Create forward sorted tables\n# r1_for_result_table.sort_values(['r1_pos'], ascending=[1])\n\n# Create reverse sorted tables\n# r1_rev_result_table.sort_values(['r1_pos'], ascending=[1])\n\n# Write out final table\n# Save output to file\ntf_table.to_csv(\"Tophat_Fusion_Results_Collapsed_Final.txt\", sep=\"\\t\", index=False, float_format='%.0f')" }, { "alpha_fraction": 0.6617978811264038, "alphanum_fraction": 0.6681994199752808, "avg_line_length": 47.01298522949219, "blob_id": "593c585abb0be77e9f95090742719e7fc5af9e10", "content_id": "18f37d8772b290c996e07442380a28fc9e2d0f5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11091, "license_type": "no_license", "max_line_length": 163, "num_lines": 231, "path": "/Edit_TopHat-Fusion_Output/Process_TF_Output_Guo.py", "repo_name": "dsguo/TGen-2016-RNA-Fusion-Project", "src_encoding": "UTF-8", "text": "# Usage Example\n# python Process_TF_Output.py <TopHat_Fusion_Result.txt> <refGene.txt> <ensGene.txt>\n\n\n#Configure Enviroment\nimport pandas as pd\nimport numpy as np\nimport sys\n\n# Set a validation screening window\nwindow = 60000\n\n### set a distance-apart window buffer for overlapping position windows (above), actual overlap is accounted for below ###\ndist = 1000\n\n# Read in the original tophat-fusion result text file\n#tf_file = \"/Users/jkeats/MMRF_2000.txt\"\ntf_file = sys.argv[1]\n# Define column header\ncol_headers = ['Specimen',\n 'firstGene', 'firstGene_Chr', 'firstGene_Pos',\n 'secondGene', 'secondGene_Chr', 'secondGene_Pos',\n 'spanningReads', 'spanningMatePairs', 'spanningMPFusion', 'score']\ntf_table = pd.read_csv(tf_file, sep=\"\\t\", header=None, names=col_headers)\n#tf_table.head()\n\n# Read in the refGene.txt and ensGene.txt files used for annotations by TopHat-Fusion\ntable_headers = ['uniq_id', 'transcript_id', 'chromosome', 'strand', 'transcript_start', 'transcript_end',\n 'cds_start', 'cds_end', 'exon_count', 'exon_starts', 'exon_ends', 'blank', 'gene_id',\n 'cds_start_stat', 'cds_end_stat', 'exon_frames']\n\nrefGene_file = sys.argv[2]\nrefGene_table = pd.read_csv(refGene_file, sep=\"\\t\", header=None, names=table_headers)\n#refGene_table.head()\n\nensGene_file = sys.argv[3]\nensGene_table = pd.read_csv(ensGene_file, sep=\"\\t\", header=None, names=table_headers)\n#ensGene_table.head()\n\n# Read in the exon model file with ENSG and HUGO IDs\n#exon_models_file = \"/Users/jkeats/Ensembl_V74_ENSG_HUGO_ENSE_Table.txt\"\n#exon_models_file = sys.argv[2]\n#exon_models_table = pd.read_csv(exon_models_file, sep=\"\\t\")\n#exon_models_table.head()\n\n# Create a list of unique first and second gene pairs\ngene_pairs = tf_table.loc[:,['firstGene', 'secondGene']]\ngene_pairs.drop_duplicates(inplace=True)\n#gene_pairs.head()\n\n# Create new columns on the unique gene pairs table\ngene_pairs['ordered_genes'] = np.nan\ngene_pairs['ordered_first'] = np.nan\ngene_pairs['ordered_second'] = np.nan\ngene_pairs['fusion_pairs'] = np.nan\ngene_pairs['firstGene_Strand'] = np.nan\ngene_pairs['firstGene_Chr'] = np.nan\ngene_pairs['firstGene_Window_Start'] = np.nan\ngene_pairs['firstGene_Window_End'] = np.nan\ngene_pairs['firstGene_Pos_List'] = np.nan\ngene_pairs['secondGene_Strand'] = np.nan\ngene_pairs['secondGene_Chr'] = np.nan\ngene_pairs['secondGene_Window_Start'] = np.nan\ngene_pairs['secondGene_Window_End'] = np.nan\ngene_pairs['secondGene_Pos_List'] = np.nan\ngene_pairs['spanningReads_Sum'] = np.nan\ngene_pairs['spanningReads_List'] = np.nan\ngene_pairs['spanningMatePairs_Sum'] = np.nan\ngene_pairs['spanningMatePairs_List'] = np.nan\ngene_pairs['spanningMPFusion_Sum'] = np.nan\ngene_pairs['spanningMPFusion_List'] = np.nan\n\n# Loop through unique gene pair table to extract counts from tophat-fusion output table\nfor row in gene_pairs.index:\n # Create list with the two gene pairs on each row\n first = gene_pairs.at[row, 'firstGene']\n second = gene_pairs.at[row, 'secondGene']\n temp = [first, second]\n # Sort the temp list created for each row\n temp.sort()\n # Extract each of the sorted values and create a concatenation\n ordered_first = temp[0]\n ordered_second = temp[1]\n ordered_genes = ordered_first + \"_\" + ordered_second\n\n # Extract the strand for first gene\n if len(first) == 15 and first[:4] == \"ENSG\":\n # print(first + \" is a ENSG_ID\")\n first_Gene_table = ensGene_table[ensGene_table.gene_id == first]\n else:\n # print(first + \" is a HUGO_ID\")\n first_Gene_table = refGene_table[refGene_table.gene_id == first]\n loop = 0\n for i in first_Gene_table.index:\n if loop == 0:\n firstGene_Strand = first_Gene_table.at[i, 'strand']\n loop = 1\n elif loop == 1:\n firstGene_Strand2 = first_Gene_table.at[i, 'strand']\n\n # Extract the strand for second gene\n if len(second) == 15 and second[:4] == \"ENSG\":\n # print(first + \" is a ENSG_ID\")\n second_Gene_table = ensGene_table[ensGene_table.gene_id == second]\n else:\n # print(first + \" is a HUGO_ID\")\n second_Gene_table = refGene_table[refGene_table.gene_id == second]\n loop = 0\n for i in second_Gene_table.index:\n if loop == 0:\n secondGene_Strand = second_Gene_table.at[i, 'strand']\n loop = 1\n elif loop == 1:\n secondGene_Strand2 = second_Gene_table.at[i, 'strand']\n\n # Make a table from the input tophat table for each unique line\n pair_table = tf_table[(tf_table.firstGene == first) & (tf_table.secondGene == second)]\n fusion_pairs = len(pair_table.index)\n spanningReads_Sum = pair_table['spanningReads'].sum()\n spanningMatePairs_Sum = pair_table['spanningMatePairs'].sum()\n spanningMPFusion_Sum = pair_table['spanningMPFusion'].sum()\n\n loop = 0\n for line in pair_table.index:\n if loop == 0:\n firstGene_Chr = pair_table.at[line, 'firstGene_Chr']\n secondGene_Chr = pair_table.at[line, 'secondGene_Chr']\n firstGene_Pos_List = pair_table.at[line, 'firstGene_Pos']\n secondGene_Pos_List = pair_table.at[line, 'secondGene_Pos']\n spanningReads_List = pair_table.at[line, 'spanningReads']\n spanningMatePairs_List = pair_table.at[line, 'spanningMatePairs']\n spanningMPFusion_List = pair_table.at[line, 'spanningMPFusion']\n loop = 1\n elif loop == 1:\n firstGene_Chr2 = pair_table.at[line, 'firstGene_Chr']\n secondGene_Chr2 = pair_table.at[line, 'secondGene_Chr']\n # now test to ensure the chromosomes of each line are not changing\n if firstGene_Chr == firstGene_Chr2 and secondGene_Chr == secondGene_Chr2:\n print('Chromosomes Match')\n else:\n print('ERROR - ERROR')\n firstGene_Pos_List2 = pair_table.at[line, 'firstGene_Pos']\n firstGene_Pos_List = str(firstGene_Pos_List) + \";\" + str(firstGene_Pos_List2)\n secondGene_Pos_List2 = pair_table.at[line, 'secondGene_Pos']\n secondGene_Pos_List = str(secondGene_Pos_List) + \";\" + str(secondGene_Pos_List2)\n spanningReads_List2 = pair_table.at[line, 'spanningReads']\n spanningReads_List = str(spanningReads_List) + \";\" + str(spanningReads_List2)\n spanningMatePairs_List2 = pair_table.at[line, 'spanningMatePairs']\n spanningMatePairs_List = str(spanningMatePairs_List) + \";\" + str(spanningMatePairs_List2)\n spanningMPFusion_List2 = pair_table.at[line, 'spanningMPFusion']\n spanningMPFusion_List = str(spanningMPFusion_List) + \";\" + str(spanningMPFusion_List2)\n\n # Determine the proper windows to target for WGS extraction/validation\n # first gene\n firstGene_Pos_Mean = pair_table['firstGene_Pos'].mean()\n firstGene_Window_Start = int(firstGene_Pos_Mean) - window\n firstGene_Window_End = int(firstGene_Pos_Mean) + window\n # second gene\n secondGene_Pos_Mean = pair_table['secondGene_Pos'].mean()\n secondGene_Window_Start = int(secondGene_Pos_Mean) - window\n secondGene_Window_End = int(secondGene_Pos_Mean) + window\n\n ### check if the two windows are overlapping: Not sure if other variables must be considered, such as the first gene being further along in the chromosome. ###\n if firstGene_Chr == secondGene_Chr: # if the fusion is on the same chromosome; will these variable work or need to include firstGene_Chr2 and secondGene_Chr2?\n if firstGene_Pos_Mean < secondGene_Pos_Mean:\n if firstGene_Window_End < secondGene_Window_Start:\n pass\n elif firstGene_Window_End > secondGene_Window_Start:\n halfDiff = int((firstGene_Window_End - secondGene_Window_Start)/2) # how much windows overlap divided by 2\n midpoint = int(round(sum([firstGene_Pos_Mean, secondGene_Pos_Mean])/2))\n firstGene_Window_End = midpoint - halfDiff - dist\n secondGene_Window_Start = midpoint + halfDiff + dist\n else:\n if secondGene_Window_End < firstGene_Window_Start:\n pass\n elif secondGene_Window_End > firstGene_Window_Start:\n halfDiff = int((secondGene_Window_End - firstGene_Window_Start)/2) # how much windows overlap divided by two\n midpoint = int(round(sum([firstGene_Pos_Mean, secondGene_Pos_Mean]) / 2))\n secondGene_Window_End = midpoint - halfDiff - dist\n firstGene_Window_Start = midpoint + halfDiff + dist\n else: pass\n\n\n\n # Add the respective values to each respective row\n gene_pairs.at[[row], 'ordered_genes'] = ordered_genes\n gene_pairs.at[[row], 'ordered_first'] = ordered_first\n gene_pairs.at[[row], 'ordered_second'] = ordered_second\n gene_pairs.at[[row], 'fusion_pairs'] = fusion_pairs\n gene_pairs.at[[row], 'firstGene_Strand'] = firstGene_Strand\n gene_pairs.at[[row], 'firstGene_Chr'] = firstGene_Chr\n gene_pairs.at[[row], 'firstGene_Window_Start'] = firstGene_Window_Start\n gene_pairs.at[[row], 'firstGene_Window_End'] = firstGene_Window_End\n gene_pairs.at[[row], 'firstGene_Pos_List'] = firstGene_Pos_List\n gene_pairs.at[[row], 'secondGene_Strand'] = secondGene_Strand\n gene_pairs.at[[row], 'secondGene_Chr'] = secondGene_Chr\n gene_pairs.at[[row], 'secondGene_Window_Start'] = secondGene_Window_Start\n gene_pairs.at[[row], 'secondGene_Window_End'] = secondGene_Window_End\n gene_pairs.at[[row], 'secondGene_Pos_List'] = secondGene_Pos_List\n gene_pairs.at[[row], 'secondGene_Pos_List'] = secondGene_Pos_List\n gene_pairs.at[[row], 'spanningReads_Sum'] = spanningReads_Sum\n gene_pairs.at[[row], 'spanningReads_List'] = spanningReads_List\n gene_pairs.at[[row], 'spanningMatePairs_Sum'] = spanningMatePairs_Sum\n gene_pairs.at[[row], 'spanningMatePairs_List'] = spanningMatePairs_List\n gene_pairs.at[[row], 'spanningMPFusion_Sum'] = spanningMPFusion_Sum\n gene_pairs.at[[row], 'spanningMPFusion_List'] = spanningMPFusion_List\n\n#gene_pairs.head()\n\n# Save output to file\ngene_pairs.to_csv(\"Tophat_Fusion_Results_Collapsed.txt\", sep=\"\\t\", index=False, float_format='%.0f')\n\n# Make a BED file with first and second gene Chr, Window_Start, and Window_End\nfirstGene_BED = gene_pairs.loc[:,['firstGene_Chr', 'firstGene_Window_Start', 'firstGene_Window_End']]\nsecondGene_BED = gene_pairs.loc[:,['secondGene_Chr', 'secondGene_Window_Start', 'secondGene_Window_End']]\n\n# Concatentate and sort the two bed files\n# Give each file a common header\ncol_headers2 = ['Chr', 'Start', 'End']\nfirstGene_BED.columns = col_headers2\nsecondGene_BED.columns = col_headers2\n\n# Concatenate\nquery_BED = pd.concat([firstGene_BED, secondGene_BED], axis=0, join='outer', ignore_index=True)\n\n# Sort the Query_BED\nquery_BED.sort_values(['Chr', 'Start', 'End'], ascending=[1, 1, 1], inplace=True)\n\n# Save Output for manipulation with BEDTOOLS to collapse intervals\n# Save output to file\nquery_BED.to_csv(\"Temp_Query_BED.bed\", sep=\"\\t\", index=False, header=False, float_format='%.0f')\n" }, { "alpha_fraction": 0.6977842450141907, "alphanum_fraction": 0.7009081244468689, "avg_line_length": 59.3684196472168, "blob_id": "989fe9d3c60053b0a25b93eb503b464a99b0bb95", "content_id": "845c634c5b5fa916b883224830bf2663357db6ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13765, "license_type": "no_license", "max_line_length": 346, "num_lines": 228, "path": "/Edit_TopHat-Fusion_Output/collapse_tophatfusion.py", "repo_name": "dsguo/TGen-2016-RNA-Fusion-Project", "src_encoding": "UTF-8", "text": "#collapses tophatfusion output file, <filename>.results.txt\n\n#import os\nimport pandas as pd\n#import numpy as np\n#import matplotlib.pyplot as plt\n\n#read in text file in pandas to form csv table, adding header names\nthpd = pd.read_csv(\"U138MG_ATCC.thFusion.result.txt\", sep=\"\\t\", index_col=0, names=[\"Sample_name\", \"Left_gene\", \"Left_chr\", \"Left_pos_center\", \"Right_gene\", \"Right_chr\", \"Right_pos_center\", \"Spanning_sum\", \"Spanning_mate_pairs_sum\", \"End_spanning_fusion_sum\", \"Fusion_score\" ])\n\n#read in text file in numpy\n#thnp = np.loadtxt(\"U138MG_ATCC.thFusion.result.txt\", dtype='str')\n\n#add header names to txt file\n#thnp_names = np.savetxt(\"Headers_named\", thnp, '''fmt= \", \"''', header=\"Sample_name, Left_gene, Left_chr, Left_coor, Right_gene, Right_chr, Right_coor, Spanning, Spanning_mate_pairs, End_spanning_fusion, Fusion_score\")\n\n#transpose panda table and save as another variable\nthpd_trans = thpd.T\n\n## set global variables, will be updated in needToCollapse function\\\n#nameL = []\n# workTable = []\n#tupleL = [] # contains tuples of each gene pair called in original table\n#duplicatesL = [] # contains list of gene pairs that are duplicated and how many times they appear in the original table\n#masterL = [] # contains list of gene pairs that are duplicated and which row indices they are in the original table\n#posAverageL = [] # contains list of duplicated gene pairs as tuples, their averaged values as tuples (left, right), and lists of all their left and right values (two lists)\n#readSumL = []\n# shortTable = []\n\n# Add unique number column and set index of table to numbers to avoid duplicate index problems\ndef addIndexL(table):\n '''Takes as input a pandas table. Adds a column of increasing whole numbers to the front of the table.'''\n nameL = [] # list of integers the length of the number of rows in the table\n for i in range(len(table.index)): # makes the python-numbered list (starts with 0) nameL\n nameL.append(i)\n global nameL\n table.insert(0, \"Fusion_num\", nameL) # adds the nameL list as a column into the table, at the front\n return\n\ndef setIndexL(table):\n '''Takes as input a pandas table that has a column of whole numbers added. Sets this column of numbers as the index of the pandas table, replacing the default. Returns an updated working table.'''\n workTable = table.set_index(\"Fusion_num\") # sets the index as the newly added column Fusion_num, replacing the default index of duplicate Sample_names\n #global workTable\n return workTable\n\n\n#see if there are duplicates\ndef needToCollapse(table):\n '''Takes as input a working table (with reset numbered index). Determines whether the table needs to be collapsed by detecting duplicates. Returns Boolean True if duplicates are detected, False if there are not duplicates and the table does not need to be collapsed.'''\n tupleL = [] # list of fusions, consisting of tuples of the gene pairs\n for row in table.index: # creates tupleL by looping through the rows and calling the gene names into tuples\n left_gene=table.at[row, \"Left_gene\"] # column Left_gene\n right_gene=table.at[row, \"Right_gene\"] # column Right_gene\n pair = (left_gene, right_gene)\n tupleL.append(pair)\n #print(\"tupleL:\", tupleL)\n global tupleL # updates global(script) variable\n duplicatesL = [] # list of gene-pair duplicates and how many times they are duplicated\n for i in range(len(tupleL)): # for index of tuple in tupleL\n mypair = tupleL[i]\n mypaircount = 0\n for pairs in tupleL:\n if mypair == pairs:\n mypaircount += 1\n if mypaircount > 1:\n duplicate = (mypair, mypaircount)\n if duplicate not in duplicatesL: # removes repeats in duplicatesL\n duplicatesL.append(duplicate)\n global duplicatesL # updates global(script) variable\n if len(duplicatesL)>0:\n #status = \"Yes, duplicates detected.\"\n #print(status)\n return True\n else:\n #status = \"No, no duplicates detected.\"\n #print(status)\n return False\n\n\n# identify rows with duplicates\ndef duplicateRowIndex(duplicatesL, tupleL):\n '''Must run needToCollapse function first. Takes as input two of needToCollapse's variables made global, duplicatesL and tupleL. Returns a master list of tuples, the duplicated gene pair tuple and the indices in the table where they occur. '''\n masterL = [] # list with duplicated gene pairs and their row indexes in the table\n for i in range(len(duplicatesL)):\n duppair = duplicatesL[i][0] # the gene pair tuple from duplicatesL\n indexL = [] # will store the gene pair's row index values in the table\n for j in range(len(tupleL)): # for row index in original table\n if duppair == tupleL[j]:\n indexL.append(j)\n duple = (duppair, indexL)\n masterL.append(duple)\n #global masterL\n return masterL\n\n# find average of fusion position (left and right)\ndef averagePosition(table, masterL):\n '''Must run duplicateRowIndex first. Takes as input a pandas working table, and duplicateRowIndex output masterL. Returns a list, posAverageL, of lists containing gene pair name, average left and right positions in a tuple, as well as lists of all original given positions. '''\n posAverageL = [] # list with duplicated gene pairs, their averaged positions, and list of all of their left and right positions\n for i in range(len(masterL)): # for index in master list of duplicated gene pairs and their row indices\n pair = masterL[i][0] # gene pair as tuple\n indexL = masterL[i][1] # list of row indices\n leftValueL = [] # list of left gene positions\n rightValueL = [] # list of right gene positions\n for j in indexL:\n left_value = table.at[table.index[j], \"Left_pos_center\"] # position of left gene on a particular row\n right_value = table.at[table.index[j], \"Right_pos_center\"] # position of right gene on a particular row\n leftValueL.append(left_value)\n rightValueL.append(right_value)\n leftValueAvg = int(round(sum(leftValueL) / len(leftValueL))) # averages the left gene positions and rounds to nearest integer\n rightValueAvg = int(round(sum(rightValueL) / len(rightValueL))) # averages the right gene positions and rounds to nearest integer\n averageValues = (leftValueAvg, rightValueAvg) # tuple of average left and right gene positions\n averageInfo = [pair, averageValues, leftValueL, rightValueL] # list for one gene pair of the gene pair names, average positions, and lists of all the left and right posisions\n posAverageL.append(averageInfo)\n #global posAverageL\n return posAverageL\n\n# find sums of supporting read counts\ndef sumReadCounts(table, masterL):\n '''Must run duplicateRowIndex first. Takes as input a pandas working table, and duplicateRowIndex output masterL. Returns a list, readSumL, of lists containing gene pair name, sum of supporting read counts in a list, as well as lists of all original given read counts.'''\n readSumL = [] # list with duplicated gene pairs, their sums of the three read counts as a list of length 3, and lists of all of their individual counts\n for i in range(len(masterL)): # for index in master list of duplicated gene pairs and their row indices\n pair = masterL[i][0] # gene pair as tuple\n indexL = masterL[i][1] # list of row indices\n spanningValueL = []\n spanningMateValueL = []\n endSpanningValueL = []\n for j in indexL:\n spanning_value = table.at[table.index[j], \"Spanning_sum\"]\n spanningMate_value = table.at[table.index[j], \"Spanning_mate_pairs_sum\"]\n endSpanning_value = table.at[table.index[j], \"End_spanning_fusion_sum\"]\n spanningValueL.append(spanning_value)\n spanningMateValueL.append(spanningMate_value)\n endSpanningValueL.append(endSpanning_value)\n spanningSum = sum(spanningValueL)\n spanningMateSum = sum(spanningMateValueL)\n endSpanningSum = sum(endSpanningValueL)\n readSums = [spanningSum, spanningMateSum, endSpanningSum]\n sumInfo = [pair, readSums, spanningValueL, spanningMateValueL, endSpanningValueL]\n readSumL.append(sumInfo)\n #global readSumL\n return readSumL\n\n# make new table with duplicates removed\n# table.drop(table.index[list_of_rows_to_remove]\ndef removeDuplicateRows(table, masterL):\n '''Must run duplicateRowIndex first. Takes as input a pandas working table and duplicateRowIndex output masterL. Removes all but one of the duplicate rows for each duplicated gene pair. Returns a new shortened table which does not contain any average or summed values.'''\n removeRowL = []\n for i in range(len(masterL)):\n pair = masterL[i][0] # gene pair as tuple\n indexL = masterL[i][1] # list of row indices\n newIndexL = indexL[1:] # list of row indices to remove (keeping the first one)\n removeRowL.extend(newIndexL)\n shortTable = table.drop(table.index[removeRowL])\n #global shortTable\n return shortTable\n\n# make lists of revised column names for insertion, and replace duplicate values for position and read count with correct averages and sums\n\ndef columnLists(table, posAverageL, readSumL):\n '''Must run removeDuplicateRows, averagePosition, and sumReadCounts first. Takes as input a pandas shortened table, and outputs from averagePosition and sumReadCounts. Updates position and read count information for previously duplicated rows. Returns lists of values necessary for the insertion of new columns into the final pandas table.'''\n MleftPosArrayL = []\n MrightPosArrayL = []\n MspanningArrayL = []\n MspanningMateArrayL = []\n MendSpanningArrayL = []\n dupGenePairL = []\n for i in range(len(posAverageL)): # get list of gene pair tuples\n dupGenePair = posAverageL[i][0]\n dupGenePairL.append(dupGenePair)\n for row in table.index:\n leftPosArrayL = []\n rightPosArrayL = []\n spanningArrayL = []\n spanningMateArrayL = []\n endSpanningArrayL = []\n if (table.at[row, \"Left_gene\"], table.at[row, \"Right_gene\"]) not in dupGenePairL: # if the gene pair is not duplicated\n leftPosArrayL.append(table.at[row, \"Left_pos_center\"])\n rightPosArrayL.append(table.at[row, \"Right_pos_center\"])\n spanningArrayL.append(table.at[row, \"Spanning_sum\"])\n spanningMateArrayL.append(table.at[row, \"Spanning_mate_pairs_sum\"])\n endSpanningArrayL.append(table.at[row, \"End_spanning_fusion_sum\"])\n MleftPosArrayL.append(leftPosArrayL) # append short list to master lists that will become column values\n MrightPosArrayL.append(rightPosArrayL)\n MspanningArrayL.append(spanningArrayL)\n MspanningMateArrayL.append(spanningMateArrayL)\n MendSpanningArrayL.append(endSpanningArrayL)\n else:\n j = dupGenePairL.index((table.at[row, \"Left_gene\"], table.at[row, \"Right_gene\"])) # finds the index of this gene pair in teh dupGenePair list\n left_pos_center = posAverageL[j][1][0]\n right_pos_center = posAverageL[j][1][1]\n spanning_sum = readSumL[j][1][0]\n spanning_mate_sum = readSumL[j][1][1]\n end_spanning_sum = readSumL[j][1][2]\n table.set_value(row, \"Left_pos_center\", left_pos_center) # replace with average and summed values\n table.set_value(row, \"Right_pos_center\", right_pos_center)\n table.set_value(row, \"Spanning_sum\", spanning_sum)\n table.set_value(row, \"Spanning_mate_pairs_sum\", spanning_mate_sum)\n table.set_value(row, \"End_spanning_fusion_sum\", end_spanning_sum)\n left_pos_array = posAverageL[j][2]\n right_pos_array = posAverageL[j][3]\n spanning_array = readSumL[j][2]\n spanning_mate_array = readSumL[j][3]\n end_spanning_array = readSumL[j][4]\n leftPosArrayL.extend(left_pos_array) # assign individual arrays\n rightPosArrayL.extend(right_pos_array)\n spanningArrayL.extend(spanning_array)\n spanningMateArrayL.extend(spanning_mate_array)\n endSpanningArrayL.extend(end_spanning_array)\n MleftPosArrayL.append(leftPosArrayL) # append array list to master lists that will become column values\n MrightPosArrayL.append(rightPosArrayL)\n MspanningArrayL.append(spanningArrayL)\n MspanningMateArrayL.append(spanningMateArrayL)\n MendSpanningArrayL.append(endSpanningArrayL)\n #global MleftPosArrayL, MrightPosArrayL, MspanningArrayL, MspanningMateArrayL, MendSpanningArrayL\n return MleftPosArrayL, MrightPosArrayL, MspanningArrayL, MspanningMateArrayL, MendSpanningArrayL\n\n# add necessary additional columns to pandas table\n# thpd.insert(newColumnIndex, column_Name, list_of_values)\n\ndef insertNewColumns(table, MleftPosArrayL, MrightPosArrayL, MspanningArrayL, MspanningMateArrayL, MendSpanningArrayL):\n '''Must run columnLists and all other funcitons first. Takes as input a pandas shortened table and the column value lists from columnLists. Inserts and names new columns starting from the end of the table for ease of positioning. Returns a revised, updated table with appropriate columns and column values.'''\n # add from reverse to not mess up order\n table.insert(9, \"End_spanning_fusion_array\", MendSpanningArrayL)\n table.insert(8, \"Spanning_mate_pairs_array\", MspanningMateArrayL)\n table.insert(7, \"Spanning_array\", MspanningArrayL)\n table.insert(6, \"Right_pos_array\", MrightPosArrayL)\n table.insert(3, \"Left_pos_array\", MleftPosArrayL)\n return table\n\n" }, { "alpha_fraction": 0.7291666865348816, "alphanum_fraction": 0.8125, "avg_line_length": 47, "blob_id": "52df44ae455bac40773b39e714eb471ec1798296", "content_id": "5f4851e68fa370161267a4327c07ea374d779697", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 96, "license_type": "no_license", "max_line_length": 64, "num_lines": 2, "path": "/README.md", "repo_name": "dsguo/TGen-2016-RNA-Fusion-Project", "src_encoding": "UTF-8", "text": "# TGen-2016-RNA-Fusion-Project\nCode written for the 2016 Helios Scholars project on RNA fusions\n" }, { "alpha_fraction": 0.7640718817710876, "alphanum_fraction": 0.7658682465553284, "avg_line_length": 97.23529052734375, "blob_id": "7104a56b8b10ef655c62a1747560c2e5ef109063", "content_id": "d1e4e66d86c2cad5824d3c1fdaf36af58f900fd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3340, "license_type": "no_license", "max_line_length": 361, "num_lines": 34, "path": "/Edit_TopHat-Fusion_Output/Master_Collapse.py", "repo_name": "dsguo/TGen-2016-RNA-Fusion-Project", "src_encoding": "UTF-8", "text": "import collapse_tophatfusion as cthf\nimport pandas as pd\n\nexportFormat = \"/t\" # changes separation values; comma separated: \",\"\n\n#inputFile\n#headerNameList = [\"Sample_name\", \"Left_gene\", \"Left_chr\", \"Left_pos_center\", \"Right_gene\", \"Right_chr\", \"Right_pos_center\", \"Spanning_sum\", \"Spanning_mate_pairs_sum\", \"End_spanning_fusion_sum\", \"Fusion_score\"]\n\ndef masterCollapse(inputFile, headerNameList):\n '''Takes as input a TopHat-Fuions output .txt file and a list of header names (of length 11) and makes a table. Looks for duplicate gene fusions in the table rows. If found, duplicates will be collapsed into row, their positions averaged and supporting reads summed. Returns the final modified table, with has extra columns for lists of collapsed values.'''\n startTable = pd.read_csv(inputFile, sep=\"\\t\", index_col=0, names=headerNameList) # reads in .txt file and converts to a pandas table\n cthf.addIndexL(startTable) # adds a numbered column to front of table\n workTable = cthf.setIndexL(startTable) # sets the index of the working table to the numbered column, replacing default index\n if cthf.needToCollapse(workTable) == False: # if there are no duplicate gene pairs in the table\n posAverageL = [] # empty list needed for columnLists function\n readSumL = [] # empty list needed for columnLists function\n MleftPosArrayL, MrightPosArrayL, MspanningArrayL, MspanningMateArrayL, MendSpanningArrayL = cthf.columnLists(workTable, posAverageL, readSumL) # create value lists for new columns\n revisedTable = cthf.insertNewColumns(workTable, MleftPosArrayL, MrightPosArrayL, MspanningArrayL, MspanningMateArrayL, MendSpanningArrayL) # insert new columns\n print(\"This table does not contain duplicates. Exporting a header-inclusive .csv file.\")\n return revisedTable\n else:\n masterL = cthf.duplicateRowIndex(cthf.duplicatesL, cthf.tupleL) # creates list of duplicate gene pairs and their row indices within the table\n posAverageL = cthf.averagePosition(workTable, masterL) # creates list of information on average positions and corresponding lists of original positions called\n readSumL = cthf.sumReadCounts(workTable, masterL) # creates list of information on summed read counts and corresponding lists of original read counts called\n shortTable = cthf.removeDuplicateRows(workTable, masterL) # creates shortened table with duplicate rows removed\n MleftPosArrayL, MrightPosArrayL, MspanningArrayL, MspanningMateArrayL, MendSpanningArrayL = cthf.columnLists(shortTable, posAverageL, readSumL) # creates value lists for new columns\n revisedTable = cthf.insertNewColumns(shortTable, MleftPosArrayL, MrightPosArrayL, MspanningArrayL, MspanningMateArrayL, MendSpanningArrayL) # insert new columns\n print(\"Duplicates detected. Exporting collapsed, header-inclusive .csv file.\")\n return revisedTable\n\ndef exportAsCSV(inputFile, headerNameList):\n '''Takes as input a TopHat-Fuions output .txt file and a list of header names (of length 11) and makes a table. Runs the function masterCollapse and exports final, edited / collapsed table as a csv with file name {QC}.thFusion.result.collapsed.csv.'''\n finalTable = masterCollapse(inputFile, headerNameList)\n finalTable.to_csv(inputFile[:-4] + \".collapsed.csv\", sep=exportFormat)\n" } ]
5
Iscaraca/BBCSLightningLab
https://github.com/Iscaraca/BBCSLightningLab
7504dea0a16eef0aaf218a5de07ca9349546217f
8df30ca9078ad6d2e07fed1fb32345af9e6c644f
04dc6f90391766506ec8c6b5fa99ad20b9727001
refs/heads/master
2022-07-26T10:50:29.274961
2020-05-13T10:14:29
2020-05-13T10:14:29
263,546,137
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6404958963394165, "alphanum_fraction": 0.64462810754776, "avg_line_length": 19.16666603088379, "blob_id": "83cd2ba16602217e752c9d1739d1b36a1f1e7fba", "content_id": "fcc5d2515a4c94671bc580c1bf63d85831ad457f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 74, "num_lines": 12, "path": "/init-thread.py", "repo_name": "Iscaraca/BBCSLightningLab", "src_encoding": "UTF-8", "text": "import threading\nimport time\n\ndef sleeper(n):\n print(f\"Hi I'll sleep for {n} seconds\")\n time.sleep(n)\n print(\"I have awakened!\")\n\nt = threading.Thread(target = sleeper, name = \"test-thread\", args = ([5]))\nt.start()\n\nprint(\"Wake up\")\n" }, { "alpha_fraction": 0.7828390002250671, "alphanum_fraction": 0.7836864590644836, "avg_line_length": 294, "blob_id": "a37c9312c7558c8729df9777d0663c22c8d86c5d", "content_id": "a2226162c45d9480b00cea9dd21c0b1548666f77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4720, "license_type": "no_license", "max_line_length": 1244, "num_lines": 16, "path": "/README.md", "repo_name": "Iscaraca/BBCSLightningLab", "src_encoding": "UTF-8", "text": "# BBCSLightningLab\n### A workshop I conducted on multithreading in python and the GIL\n\nIn this lightning lab I'll be going through multithreading in Python. Multithreading gives you the ability to run multiple tasks concurrently. What I mean by concurrently is to have multiple tasks run independently, so while task A is running, I can start task B. I don't have to wait unttil task A is finished. One way of having tasks run concurrently is to have all your tasks run in parallel, meaning simultaneously. But becase they're running simultaneously, they're using multiple CPUs, one CPU for each task. However, this is a problem in Python due to something called the global interpreter lock, or the GIL for short, which prevents tasks from running in parallel.\n\nWhy this is so is due to how Python manages memory. Every time you create a variable, an array, it gets stored in your device's memory. But you can't hold that information in memory forever, because it's taking up precious space, so you have to let go of the memory after you don't need it anymore. How Python achieves this is through something called reference counting. All objects created in Python, like arrays and variables, have a reference count that keeps track of the number of references to the object. When this count reaches 0, Python assumes you don't have a use for it anymore and the memory occupied by the object is released. Let's take a brief look at how reference counting works. We create an empty list object, make a the value of the object, set b to a, and get the reference count of the list. We can see that it's 3, because its referenced by a, by b, and by the argument passed to this function. The problem with this way of managing memory is that when two tasks are running completely in parallel, it can increase or decrease an object's reference count simultaneously, and if this happens it can incorrectly release the memory while references to that object still exist. So we want to keep this reference count safe.\n\nPython does this by placing the GIL, a single lock on the interpreter which keeps the reference counts of objects safe. Since there's only one lock though, and the rule is that a task must get the lock to execute, it effectively makes all Python program running on the CPU single threaded, meaning only one task can run at a time. Python could use multiple locks, but that would mean a decrease in performance.\n\nBut what if we wanted to have multiple threads executing at the same time? One way of going about this is to remove the GIL, but we won't go into that today. What we can do, which is a much safer option, is to go back and forth between threads very vary rapidly, to give the illusion that the threads are running concurrently. There is a module for this in Python, it's called threading, and I'm going to show you how to use this module briefly today. I'll be importing time as well, because I'll be making use of the sleep function to demonstrate concurrency.\n\nThe first thing you want to do whenever you use a thread is to have a function, because threads can only execute a function. So we have to define a function, and then initialise our thread. We can define our own function by typing the keyword def, followed by the function name, I'll call it sleeper, and the parameters the function takes in in parentheses, in this case I'll take the number of seconds it sleeps as n. We can print something like \"Hi, I'll be sleeping for n seconds\" at the start, sleep for that number of seconds and then print out \"I've woken up!\" or something like that. So this is the function that we're going to have the thread execute.\n\nWe can initialise a thread, we'll name it t, by calling the threading module, and call the thread class within that module, so threading.Thread. When creating a thread, there are a few parameters we need to be aware of, so the first is the target. The target is the function that we want to execute, and we want to execute this function sleeper, so you want to put target = sleeper. Now the next parameter is args, short for arguments for the function. Since this function takes in an integer n, we put n in brackets, in this case I'll put 5 so it sleeps for 5 seconds. That's it.\n\nTo execute a thread, all we need to do is type the name of the thread object, in this case t, and use the start method, so t.start(). Now what we can do is to type code underneath that, and the code runs at the same time, well sort of, as the thread. To demonstrate that, we can print a few \"Hi\"s after t.start(), and run this, and you can see that the printing of the \"Hi\"s doesnt't wait until after the sleeper function has completed its execution, instead they run at the same time, sort of.\n" }, { "alpha_fraction": 0.5947712659835815, "alphanum_fraction": 0.5991285443305969, "avg_line_length": 18.95652198791504, "blob_id": "88254cd4c1c4499aa81e3c04d412fdfa90be488f", "content_id": "8b7197b79941f1d4e3d1171e69e696f10466acae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 84, "num_lines": 23, "path": "/more-threads.py", "repo_name": "Iscaraca/BBCSLightningLab", "src_encoding": "UTF-8", "text": "import threading\nimport time\n\ndef sleeper(n):\n print(f\"Hi I'll sleep for {n} seconds\")\n time.sleep(n)\n print(\"I have awakened!\")\n\nstart = time.time()\nthreads = []\n\nfor i in range(5):\n t = threading.Thread(target = sleeper, name = 'thread{}'.format(i), args =([5]))\n threads.append(t)\n t.start()\n print('{} has started \\n'.format(t.name))\n \nfor i in threads:\n i.join()\n \nend = time.time()\n\nprint('time is {}'.format(end - start))\n" } ]
3
hndgy/serverless-full-stack-apps-azure-sql
https://github.com/hndgy/serverless-full-stack-apps-azure-sql
20480bd60e62dd5e18f01e9aef60273c09bb3cea
71b9d194f3622fc8ed02e8130243d1d807a32d3b
5d7ad03bf2c1e7bb2ea5455527a9ffe8992664a4
refs/heads/main
2023-09-05T21:29:25.416863
2021-11-23T15:15:36
2021-11-23T15:15:36
423,919,503
1
0
MIT
2021-11-02T16:30:01
2021-11-02T14:55:42
2021-09-14T22:04:31
null
[ { "alpha_fraction": 0.6301887035369873, "alphanum_fraction": 0.6320754885673523, "avg_line_length": 32.0625, "blob_id": "dd507138eef7244175d076dce9d3817f0093b521", "content_id": "96da623106c6d19b9cffd7ed3c3516d818657e21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 530, "license_type": "permissive", "max_line_length": 66, "num_lines": 16, "path": "/azure-static-web-app/api/node/bus-data/index.js", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "const sql = require('mssql')\n\nconst AZURE_CONN_STRING = process.env[\"AzureSQLConnectionString\"];\n\nmodule.exports = async function (context, req) { \n const pool = await sql.connect(AZURE_CONN_STRING); \n\n const busData = await pool.request()\n .input(\"routeId\", sql.Int, parseInt(req.query.rid))\n .input(\"geofenceId\", sql.Int, parseInt(req.query.gid))\n .execute(\"web.GetMonitoredBusData\"); \n\n context.res = { \n body: JSON.parse(busData.recordset[0][\"locationData\"])\n };\n}\n\n" }, { "alpha_fraction": 0.6035346388816833, "alphanum_fraction": 0.6064801216125488, "avg_line_length": 31.961164474487305, "blob_id": "dc61b12fcc0e1c9c0479522dfe97cb8a8a99021b", "content_id": "5859feab59e4ae3b7f7cbb1c87764c78901e9bf0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3395, "license_type": "permissive", "max_line_length": 163, "num_lines": 103, "path": "/azure-function/python/GetBusData/__init__.py", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "import datetime\nimport logging\nimport os\nimport requests\nimport json\nimport pyodbc\nfrom datetime import datetime as dt\nimport azure.functions as func\n\nAZURE_CONN_STRING = str(os.environ[\"AzureSQLConnectionString\"])\nGTFS_RT_FEED = str(os.environ[\"RealTimeFeedUrl\"])\nLOGIC_APP_URL = str(os.environ[\"LogicAppUrl\"])\n\ndef main(GetBusData: func.TimerRequest) -> func.HttpResponse:\n ## Get the routes we want to monitor\n routes = GetMonitoredRoutes()\n \n ## Get the real-time bus location feed\n feed = GetRealTimeFeed()\n \n ## Filter only the routes we want to monitor\n buses = [f for f in feed if int(f[\"RouteId\"]) in routes]\n\n logging.info('Received {0} buses positions, found {1} buses in monitored routes'.format(len(feed), len(buses)))\n\n ## Push data to Azure SQL and get the activated geofences\n activatedGeofences = ProcessGeoFences(buses)\n\n ## Send notifications \n for gf in activatedGeofences:\n logging.info('Vehicle %i, route %s, %sing GeoFence %s at %s UTC', gf[\"VehicleId\"], gf[\"RouteId\"], gf[\"GeoFenceStatus\"], gf[\"GeoFence\"], gf[\"TimestampUTC\"])\n TriggerLogicApp(gf)\n\ndef GetRealTimeFeed():\n response = requests.get(GTFS_RT_FEED)\n entities = json.loads(response.text)['entity']\n busData = []\n for entity in entities:\n v = entity['vehicle']\n busDetails = {\n \"DirectionId\": v['trip']['direction_id'],\n \"RouteId\": v['trip']['route_id'],\n \"VehicleId\": v['vehicle']['id'],\n \"Position\": {\n \"Latitude\": v['position']['latitude'],\n \"Longitude\": v['position']['longitude']\n },\n \"TimestampUTC\": dt.utcfromtimestamp(v['timestamp']).isoformat(sep=' ')\n }\n busData.append(busDetails) \n return busData\n\ndef GetMonitoredRoutes():\n result = executeQueryJSON('web.GetMonitoredRoutes') \n return [r[\"RouteId\"] for r in result]\n\ndef ProcessGeoFences(payload): \n result = {}\n if payload:\n result = executeQueryJSON('web.AddBusData', payload)\n logging.info('Found %i buses activating a geofence',len(result))\n return result\n\ndef TriggerLogicApp(geoFence):\n content = {\n \"value1\": str(geoFence[\"VehicleId\"]), \n \"value2\": str(geoFence[\"GeoFenceStatus\"])\n }\n\n logging.info(\"Calling Logic App webhook for {0}\".format(geoFence[\"VehicleId\"]))\n\n params = { \n \"Content-type\": \"application/json\" \n }\n\n response = requests.post(LOGIC_APP_URL, json=content, headers=params)\n if response.status_code != 202:\n logging.info('Error calling Logic App: {0}'.format(response.status_code)) \n else:\n logging.info(\"[%i/%i/%i] WebHook called successfully\", geoFence[\"VehicleId\"], geoFence[\"DirectionId\"], geoFence[\"GeoFenceId\"])\n\ndef executeQueryJSON(procedure, payload=None):\n result = {}\n try: \n conn = pyodbc.connect(AZURE_CONN_STRING)\n \n with conn.cursor() as cursor:\n if payload: \n cursor.execute(f\"EXEC {procedure} ?\", json.dumps(payload))\n else:\n cursor.execute(f\"EXEC {procedure}\")\n\n result = cursor.fetchone()[0]\n \n if result:\n result = json.loads(result) \n else:\n result = {} \n \n finally:\n pass\n\n return result\n" }, { "alpha_fraction": 0.6324324607849121, "alphanum_fraction": 0.6567567586898804, "avg_line_length": 22.125, "blob_id": "fab24f0204f5e52bd2fdedafd4a87da27b4838a6", "content_id": "3e262e79114cb2e84b7f8c78fdc13eaae190358b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 372, "license_type": "permissive", "max_line_length": 105, "num_lines": 16, "path": "/azure-function/dotnet/Utils.cs", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Text;\n\nnamespace GetBusData\n{\n public static class Utils\n {\n private static readonly DateTime UnixEpoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);\n\n public static DateTime FromPosixTime(double value)\n {\n return UnixEpoch.AddSeconds(value);\n }\n }\n}\n" }, { "alpha_fraction": 0.7809917330741882, "alphanum_fraction": 0.7834710478782654, "avg_line_length": 40.72413635253906, "blob_id": "0474af0c17b21c99385533014736af9d6358b7a0", "content_id": "4eb93ebf568b5a6d875abd2a48522a831fdde97d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1210, "license_type": "permissive", "max_line_length": 505, "num_lines": 29, "path": "/README.md", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "# Build serverless, full stack applications in Azure\n\nLearn how to create, build, and deploy modern full stack applications in Azure leveraging the language of your choice (Python, Node.js, or .NET) and with a Vue.js frontend. Topics covered include modern database capabilities, CI/CD and DevOps, backend API development, REST, and more. Using a real-world scenario of trying to catch the bus, you will learn how to build a solution that leverages Azure SQL Database, Azure Functions, Azure Static Web Apps, Logic Apps, Visual Studio Code and GitHub Actions.\n\n## Features\n\nThis project features the following capabilities.\n\n- azure-sql-database\n- github-actions\n- vs-code\n- azure-functions\n- azure-logic-apps\n- azure-app-service-static\n- azure-web-apps\n\n## Architecture\n\n![Solution Architecture](./documents/catch-the-bus-architecture.svg)\n\n## Getting Started\n\n### Microsoft Learn Path\n\nLearn at your own pace, following the interactive tutorial, that will guide you through the building of the entire solution: https://aka.ms/azuremodernapps \n\n### YouTube Video\n\nThe authors discuss the architecture, the key points and the details of each technology used and each choice made: https://youtu.be/XBxBC959tLg\n" }, { "alpha_fraction": 0.7589285969734192, "alphanum_fraction": 0.7767857313156128, "avg_line_length": 55.5, "blob_id": "c89e9717b95c7caa31ca00fd76fafd3a5d8a9eb3", "content_id": "9d3b3a5914aad587e7255556228a923a25bc61ce", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 112, "license_type": "permissive", "max_line_length": 58, "num_lines": 2, "path": "/database/scripts/00-create-database.sql", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "-- Create a small serverless general purpose database\ncreate database bus_db (service_objective = 'GP_S_Gen5_1')" }, { "alpha_fraction": 0.5473684072494507, "alphanum_fraction": 0.5505263209342957, "avg_line_length": 22.700000762939453, "blob_id": "ba78bf052dbbf0f4cfe39bb859818021afdc7f77", "content_id": "60321eff8300e8913c949e2faed7f3a7972778ea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "permissive", "max_line_length": 78, "num_lines": 40, "path": "/azure-static-web-app/api/python/bus-data/__init__.py", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "import datetime\nimport logging\nimport os\nimport requests\nimport json\nimport pyodbc\nfrom datetime import datetime as dt\nimport azure.functions as func\n\nAZURE_CONN_STRING = str(os.environ[\"AzureSQLConnectionString\"])\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n result = {}\n \n try:\n rid = int(req.params['rid'])\n gid = int(req.params['gid'])\n except ValueError:\n rid = 0\n gid = 0\n\n try: \n conn = pyodbc.connect(AZURE_CONN_STRING)\n \n with conn.cursor() as cursor:\n cursor.execute(f\"EXEC [web].[GetMonitoredBusData] ?, ?\", rid, gid)\n\n result = cursor.fetchone()[0]\n \n if result:\n result = json.loads(result) \n else:\n result = {} \n\n logging.info(result) \n \n finally:\n cursor.close()\n\n return func.HttpResponse(json.dumps(result))\n\n\n" }, { "alpha_fraction": 0.5963941216468811, "alphanum_fraction": 0.5971081852912903, "avg_line_length": 40.19117736816406, "blob_id": "064c011d004cc468e1e905d47a7c638ffe4e7cf8", "content_id": "abdfeb2d4aa5be8ad6f317a0fd5f6e8328c4cc45", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 5604, "license_type": "permissive", "max_line_length": 178, "num_lines": 136, "path": "/azure-function/dotnet/BusDataManager.cs", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Text;\nusing Microsoft.Extensions.Logging;\nusing Dapper;\nusing System.Threading.Tasks;\nusing System.Net.Http;\nusing Microsoft.Data.SqlClient;\nusing System.Data;\nusing Newtonsoft;\nusing Newtonsoft.Json;\nusing System.Linq;\nusing Newtonsoft.Json.Linq;\n\nnamespace GetBusData\n{\n public class BusDataManager\n {\n public class ActivatedGeoFence\n {\n public int BusDataId { get; set; }\n public int VehicleId { get; set; }\n public int DirectionId { get; set; }\n public int RouteId { get; set; }\n public string RouteName { get; set; }\n public int GeoFenceId { get; set; }\n \t\tpublic string GeoFenceName { get; set; } \n\t\t public string GeoFenceStatus { get; set; }\n public DateTime TimestampUTC { get; set; }\n }\n\n private readonly string AZURE_CONN_STRING = Environment.GetEnvironmentVariable(\"AzureSQLConnectionString\");\n private readonly string GTFS_RT_FEED = Environment.GetEnvironmentVariable(\"RealTimeFeedUrl\");\n private readonly string LOGIC_APP_URL = Environment.GetEnvironmentVariable(\"LogicAppUrl\");\n\n private readonly ILogger _log;\n private readonly HttpClient _client = new HttpClient();\n\n public BusDataManager(ILogger log)\n {\n _log = log;\n }\n\n public async Task ProcessBusData()\n {\n // Get the real-time bus location feed\n var feed = await GetRealTimeFeed();\n \n // Get the routes we want to monitor\n var monitoredRoutes = await GetMonitoredRoutes();\n \n // Filter only the routes we want to monitor\n var buses = feed.Entities.FindAll(e => monitoredRoutes.Contains(e.Vehicle.Trip.RouteId));\n\n _log.LogInformation($\"Received {feed.Entities.Count()} buses positions, found {buses.Count()} buses in monitored routes\");\n\n // Push data to Azure SQL and get the activated geofences\n var activatedGeofences = await ProcessGeoFences(buses);\n\n // Send notifications\n foreach(var gf in activatedGeofences)\n {\n _log.LogInformation($\"Vehicle {gf.VehicleId}, route {gf.RouteName}, {gf.GeoFenceStatus} GeoFence {gf.GeoFenceName} at {gf.TimestampUTC} UTC\"); \n await TriggerLogicApp(gf);\n }\n\n }\n\n private async Task<GTFS.RealTime.Feed> GetRealTimeFeed()\n {\n var response = await _client.GetAsync(GTFS_RT_FEED);\n response.EnsureSuccessStatusCode();\n var responseString = await response.Content.ReadAsStringAsync();\n var feed = JsonConvert.DeserializeObject<GTFS.RealTime.Feed>(responseString);\n\n return feed;\n }\n\n private async Task<List<int>> GetMonitoredRoutes()\n {\n using var conn = new SqlConnection(AZURE_CONN_STRING);\n var queryResult = await conn.QuerySingleOrDefaultAsync<string>(\"web.GetMonitoredRoutes\", commandType: CommandType.StoredProcedure);\n var result = JArray.Parse(queryResult);\n return result.Select(e => (int)(e[\"RouteId\"])).ToList(); \n }\n\n private async Task<List<ActivatedGeoFence>> ProcessGeoFences(List<GTFS.RealTime.Entity> buses)\n {\n // Build payload\n var busData = new JArray();\n buses.ForEach(b =>\n {\n //_log.LogInformation($\"{b.Vehicle.VehicleId.Id}: {b.Vehicle.Position.Latitude}, {b.Vehicle.Position.Longitude}\");\n var d = new JObject\n {\n [\"DirectionId\"] = b.Vehicle.Trip.DirectionId,\n [\"RouteId\"] = b.Vehicle.Trip.RouteId,\n [\"VehicleId\"] = b.Vehicle.VehicleId.Id,\n [\"Position\"] = new JObject\n {\n [\"Latitude\"] = b.Vehicle.Position.Latitude,\n [\"Longitude\"] = b.Vehicle.Position.Longitude\n },\n [\"TimestampUTC\"] = Utils.FromPosixTime(b.Vehicle.Timestamp)\n };\n\n busData.Add(d);\n });\n \n if (buses.Count() == 0) return new List<ActivatedGeoFence>();\n\n using var conn = new SqlConnection(AZURE_CONN_STRING);\n {\n var queryResult = await conn.QuerySingleOrDefaultAsync<string>(\"web.AddBusData\", new { payload = busData.ToString() }, commandType: CommandType.StoredProcedure);\n var result = JsonConvert.DeserializeObject<List<ActivatedGeoFence>>(queryResult ?? \"[]\");\n _log.LogInformation($\"Found {result.Count()} buses activating a geofence\");\n return result;\n } \n }\n\n public async Task TriggerLogicApp(ActivatedGeoFence geoFence)\n {\n var content = JObject.Parse(\"{\" + $\"'value1':'{geoFence.VehicleId}', 'value2': '{geoFence.GeoFenceStatus}'\" + \"}\");\n\n _log.LogInformation($\"Calling Logic App webhook for {geoFence.VehicleId}\");\n\n var stringContent = new StringContent(JsonConvert.SerializeObject(content, Formatting.None), Encoding.UTF8, \"application/json\");\n var logicAppResult = await _client.PostAsync(LOGIC_APP_URL, stringContent);\n\n logicAppResult.EnsureSuccessStatusCode();\n\n _log.LogInformation($\"[{geoFence.VehicleId}/{geoFence.DirectionId}/{geoFence.GeoFenceId}] WebHook called successfully\");\n } \n\n }\n}\n" }, { "alpha_fraction": 0.682196319103241, "alphanum_fraction": 0.6855241060256958, "avg_line_length": 25.130434036254883, "blob_id": "6f5de57e27e9dac71557980a15351386bdd653b6", "content_id": "f065ef89d924b34983ec1a25c2daff95a7f810f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 601, "license_type": "permissive", "max_line_length": 100, "num_lines": 23, "path": "/azure-function/dotnet/GetBusData.cs", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "using System;\nusing System.Net.Http;\nusing Microsoft.Azure.WebJobs;\nusing Microsoft.Azure.WebJobs.Host;\nusing Microsoft.Extensions.Logging;\nusing System.Threading.Tasks;\nusing Microsoft.AspNetCore.Mvc;\nusing Microsoft.Azure.WebJobs.Extensions.Http;\nusing Microsoft.AspNetCore.Http;\n\nnamespace GetBusData\n{ \n public static class GetBusData\n {\n [FunctionName(\"GetBusData\")]\n public async static Task Run([TimerTrigger(\"*/15 * * * * *\")]TimerInfo myTimer, ILogger log)\n {\n var m = new BusDataManager(log);\n await m.ProcessBusData();\n }\n\n }\n}\n" }, { "alpha_fraction": 0.7291152477264404, "alphanum_fraction": 0.7338737845420837, "avg_line_length": 35.1464958190918, "blob_id": "8d2c69f3e2ed43ce30aa003fe768af20bddf396d", "content_id": "b08a231da748df0272dcb675fee53327c8acd064", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5674, "license_type": "permissive", "max_line_length": 318, "num_lines": 157, "path": "/design-docs/module01.md", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "# Create the foundation for modern applications\n\nLearning path title: Build serverless, full stack applications in Azure\n\n*Add the working title [(Title guidance)](/help/learn/id-guidance-title)*\n\n## Role(s)\n\n- developer\n- database-administrator\n- solution-architect\n- devops-engineer\n- student\n- data-engineer\n\n## Level\n\n- intermediate\n\n## Product(s)\n\n- azure-sql-database\n- github\n- vs-code\n- azure-blob-storage\n- azure-devops\n\n## Prerequisites\n\n- Azure Fundamentals?\n\n## Summary\n\nBreak down a scenario for an application and build a multi-service solution based on the microservices approach. Learn how to leverage modern database capabilities to build a foundation for applications. \n\n## Learning objectives\n\n1. Create multi-service solutions from real-world scenarios\n1. Demonstrate modern Azure SQL Database capabilities \n\n## Chunk your content into subtasks\n\nIdentify the subtasks of *module title*\n\n| Subtask | What part of the introduction scenario does this subtask satisfy? | How will you assess it: **Exercise or Knowledge check**? | Which learning objective(s) does this help meet? | Does the subtask have enough learning content to justify an entire unit? If not, which other subtask will you combine it with? |\n| ---- | ---- | ---- | ---- | ---- |\n| TODO | TODO | TODO | TODO | TODO |\n| TODO | TODO | TODO | TODO | TODO |\n| TODO | TODO | TODO | TODO | TODO |\n\n## Outline the units\n\n*Add more units as needed for your content*\n\n1. **Introduction**\n\n Provide a scenario of a real-world job-task that shows how the technology is used in practice:\n\n *Catch the bus scenario*\n\n1. **Architect a solution**\n\n List the content that will enable the learner to *subtask*:\n\n - Break a problem into requirements\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Explain how various technologies help address each requirement\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Build an architecture\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 choice) question about what is Azure Function\n - Multiple choice (4 choice) question about what is Azure static web app\n\n1. **Exercise - Configure your development environment**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Download visual studio code, add any necessary extensions\n 1. Download Azure Data Studio\n 1. Fork and Clone the repository\n 1. (optional) Pick a language for the learning path\n\n1. **Modern database requirements**\n\n List the content that will enable the learner to *subtask*:\n\n - Dive deeper into some of the requirements of databases to support modern applications\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Give an overview of Azure SQL Database and how it addresses those requirements\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Revisit the scenario and discuss the elements that will be used from Azure SQL Database\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 choice) question about JSON support in Azure SQL DB\n - Multiple choice (4 choice) question about Geospatial support in Azure SQL DB\n\n1. **Exercise - Deploy and configure Azure SQL Database**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Deploy SQL DB\n 1. Connect with ADS\n 1. Set up the database using T-SQL scripts (includes loading data)\n\n1. **CI/CD for Azure SQL Database**\n\n List the content that will enable the learner to *subtask*:\n\n - Understand how CI/CD applies to databases\n - Using scripts versus using a dacpac\n - Give an overview of Azure DevOps integrations with SQL DB\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Give an overview of Github Actions (and dbup) integrations with CI/Cd \n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 choice) question about why CI/CD is important for databases\n - Multiple choice (4 choice) question about SQL DB and GitHub Actions\n\n1. **Exercise - Automate updates with GitHub Actions**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Configure GitHub secrets\n 1. Configure yaml file\n 1. Push commits\n 1. Monitor results\n\n1. **Summary**\n\n How did you solve the problem in the initial scenario with the knowledge learned in the module? \n \n *Add your summary [(Summary guidance)](/help/learn/id-guidance-module-summary-unit)*\n\n## Notes\n\nNote any additional information that may be beneficial to this content such as links, reference material, etc." }, { "alpha_fraction": 0.6042848229408264, "alphanum_fraction": 0.6080655455589294, "avg_line_length": 33.5, "blob_id": "7174dc37a850a0f6806ae3438b9bc838e60ade38", "content_id": "db057b8dfe9f65f41e41bb24c738d34d2596e4ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1587, "license_type": "permissive", "max_line_length": 130, "num_lines": 46, "path": "/azure-static-web-app/api/dotnet/ShowBusData.cs", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "using System;\nusing System.Net.Http;\nusing Microsoft.Azure.WebJobs;\nusing Microsoft.Azure.WebJobs.Host;\nusing Microsoft.Extensions.Logging;\nusing System.Threading.Tasks;\nusing Microsoft.AspNetCore.Mvc;\nusing Microsoft.Azure.WebJobs.Extensions.Http;\nusing Microsoft.AspNetCore.Http;\nusing Dapper;\nusing Microsoft.Data.SqlClient;\nusing System.Data;\nusing Newtonsoft;\nusing Newtonsoft.Json;\nusing System.Linq;\nusing Newtonsoft.Json.Linq;\n\nnamespace ShowBusData\n{ \n public static class ShowBusDataMain\n {\n private static HttpClient httpClient = new HttpClient();\n private static readonly string AZURE_CONN_STRING = Environment.GetEnvironmentVariable(\"AzureSQLConnectionString\");\n\n [FunctionName(\"ShowBusData\")]\n public static async Task<IActionResult> ShowBusData([HttpTrigger(\"get\", Route = \"bus-data\")] HttpRequest req, ILogger log)\n { \n int rid = 0, gid = 0;\n\n Int32.TryParse(req.Query[\"rid\"], out rid);\n Int32.TryParse(req.Query[\"gid\"], out gid);\n \n using(var conn = new SqlConnection(AZURE_CONN_STRING))\n {\n var result = await conn.QuerySingleOrDefaultAsync<string>(\n \"web.GetMonitoredBusData\", \n new {\n @RouteId = rid,\n @GeofenceId = gid\n }, commandType: CommandType.StoredProcedure); \n \n return new OkObjectResult(JObject.Parse(result));\n } \n }\n }\n}\n" }, { "alpha_fraction": 0.7346363067626953, "alphanum_fraction": 0.7386661171913147, "avg_line_length": 35.5, "blob_id": "f423810930fb2d8ae220b2ca34722b592ffe1653", "content_id": "ee414df16e726c9da3bfdf7b4440eb38e39e311a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4963, "license_type": "permissive", "max_line_length": 318, "num_lines": 136, "path": "/design-docs/module03.md", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "# Build full stack applications with Azure Static Web Apps and Azure SQL Database\n\nLearning path title: Build serverless, full stack applications in Azure\n\n*Add the working title [(Title guidance)](/help/learn/id-guidance-title)*\n\n## Role(s)\n\n- developer\n- database-administrator\n- solution-architect\n- devops-engineer\n- student\n- data-engineer\n\n## Level\n\n- intermediate\n\n## Product(s)\n\n- azure-sql-database\n- github\n- vs-code\n- azure-functions\n- azure-app-service-static\n- azure-web-apps\n\n\n## Prerequisites\n- Module: Develop modern applications with built-in capabilities of Azure SQL Database\n- Module: Deploy backend APIs with Azure Functions, Logic Apps, and Azure SQL Database\n\n## Summary\n\nIn the language of your choice (Node.js, Python, or .NET), deploy an Azure Static Web App which combines the power of Azure Functions and Azure Web Apps into one service managed by GitHub Actions. This application surfaces real-time geospatial data stored in an Azure SQL Database.\n\n## Learning objectives\n\n1. Create Vue.js web applications leveraging Azure Static Web Apps\n1. Design APIs in any language and integrate them with Azure SQL Database and Azure Static Web Apps\n1. Forumulate GitHub Actions to build CI/CD for Azure applications and services\n\n\n## Chunk your content into subtasks\n\nIdentify the subtasks of *module title*\n\n| Subtask | What part of the introduction scenario does this subtask satisfy? | How will you assess it: **Exercise or Knowledge check**? | Which learning objective(s) does this help meet? | Does the subtask have enough learning content to justify an entire unit? If not, which other subtask will you combine it with? |\n| ---- | ---- | ---- | ---- | ---- |\n| TODO | TODO | TODO | TODO | TODO |\n| TODO | TODO | TODO | TODO | TODO |\n| TODO | TODO | TODO | TODO | TODO |\n\n## Outline the units\n\n*Add more units as needed for your content*\n\n1. **Introduction**\n\n Provide a scenario of a real-world job-task that shows how the technology is used in practice:\n\n *Catch the bus scenario*\n\n1. **Exercise - Deploy and configure resources**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Fork repo\n 1. Deploy DB and schema\n\n1. **Accelerate app development with Azure Static Web Apps**\n\n List the content that will enable the learner to *subtask*:\n\n - Motivation for Azure static web apps\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Azure static web apps overview\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Azure static web apps and the given scenario\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 options) asking about what the purpose of Azure Static Web Apps are\n - Multiple choice (4 options) How GitHub integrations will help Azure Static Web Apps\n\n1. **Exercise - Deploy an Azure Static Web App**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Deploy empty static web app but connect to forked repo\n\n1. **Accessing Azure SQL Database with Azure Static Web Apps**\n\n List the content that will enable the learner to *subtask*:\n\n - Scenarios for intergration between the two services\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Dive into the front end integration\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Dive into the backend integration\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 options) Question about querying from the app\n - Multiple choice (4 options) Question about returning data from SQL DB (perhaps around JSON support)\n\n1. **Exercise - Configure and deploy Azure Static Web Apps with GitHub Actions**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Configure code for static web app\n 1. Run locally and monitor\n 1. Deploy with GitHub Actions\n\n1. **Summary**\n\n How did you solve the problem in the initial scenario with the knowledge learned in the module? \n \n *Add your summary [(Summary guidance)](/help/learn/id-guidance-module-summary-unit)*\n\n## Notes\n\nNote any additional information that may be beneficial to this content such as links, reference material, etc." }, { "alpha_fraction": 0.724726140499115, "alphanum_fraction": 0.7297539710998535, "avg_line_length": 34.70512771606445, "blob_id": "a51a74668bc506d087b1ef399f8c8d4ed069844a", "content_id": "8d14428d76fc6736aa64379e1e33c4334f3fcbfb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5569, "license_type": "permissive", "max_line_length": 318, "num_lines": 156, "path": "/design-docs/module02.md", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "# Deploy backend APIs with Azure Functions, Logic Apps, and Azure SQL Database\n\nLearning path title: Build serverless, full stack applications in Azure\n\n*Add the working title [(Title guidance)](/help/learn/id-guidance-title)*\n\n## Role(s)\n\n- developer\n- database-administrator\n- solution-architect\n- devops-engineer\n- student\n- data-engineer\n\n## Level\n\n- intermediate\n\n## Product(s)\n\n- azure-sql-database\n- github\n- vs-code\n- azure-functions\n- azure-logic-apps\n\n\n## Prerequisites\n- Module: Develop modern applications with built-in capabilities of Azure SQL Database\n\n## Summary\n\nIn the language of your choice (Node.js, Python, or .NET), deploy an Azure Function that adds real-time data to an Azure SQL Database and triggers an Azure Logic App for notifications when a bus enters a geofence. Leverage GitHub Actions for CI/CD.\n\n## Learning objectives\n\n1. Build backend APIs in Azure\n1. Design Azure Functions in any language and integrate them with Azure SQL Database and Azure Logic Apps\n1. Examine geospatial and JSON support built-in to Azure SQL Database\n\n## Chunk your content into subtasks\n\nIdentify the subtasks of *module title*\n\n| Subtask | What part of the introduction scenario does this subtask satisfy? | How will you assess it: **Exercise or Knowledge check**? | Which learning objective(s) does this help meet? | Does the subtask have enough learning content to justify an entire unit? If not, which other subtask will you combine it with? |\n| ---- | ---- | ---- | ---- | ---- |\n| TODO | TODO | TODO | TODO | TODO |\n| TODO | TODO | TODO | TODO | TODO |\n| TODO | TODO | TODO | TODO | TODO |\n\n## Outline the units\n\n*Add more units as needed for your content*\n\n1. **Introduction**\n\n Provide a scenario of a real-world job-task that shows how the technology is used in practice:\n\n *Catch the bus scenario*\n\n1. **Creating APIs with Azure Functions**\n\n List the content that will enable the learner to *subtask*:\n\n - Overview of scenarios that require Azure Functions\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Overview of Azure Functions\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Azure Functions for this scenario + integration with SQL DB\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 options) what's supported in Azure Functions\n - Multiple choice (4 options) how to connect Azure Functions and SQL DB\n\n1. **Exercise - Deploy and configure resources**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Fork and Clone the repository\n 1. Deploy Azure SQL Database \n 1. Deploy empty Azure Function\n 1. Configure yaml file and github secrets\n 1. Push and monitor deployment\n\n1. **Build a solution to get bus data and monitor routes**\n\n List the content that will enable the learner to *subtask*:\n\n - Get Real time feed / explain JSON\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Get monitored routes\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Process geofences / explain geospatial support\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 options) JSON support in SQL DB\n - Multiple choice (4 options) Geospatial Support in SQL DB\n\n1. **Exercise - Get bus data**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Fill in the Azure Function code (minus the last part)\n 1. Run locally\n 1. Compare with what's seen in Azure SQL Database using ADS\n 1. Deploy with CI/CD\n 1. Monitor in the Azure portal\n\n1. **Pushing notifications with Azure Logic Apps**\n\n List the content that will enable the learner to *subtask*:\n\n - Overview of Azure Logic Apps\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Integrations of Azure Logic Apps + Azure Functions + Azure SQL DB\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Azure Logic Apps in this scenario\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n\n\n1. **Exercise - Deploy Azure Logic App**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Deploy with PowerShell\n 1. Set up outlook connector\n 1. Monitor Azure Function and Outlook to observe results\n\n\n1. **Summary**\n\n How did you solve the problem in the initial scenario with the knowledge learned in the module? \n \n *Add your summary [(Summary guidance)](/help/learn/id-guidance-module-summary-unit)*\n\n## Notes\n\nNote any additional information that may be beneficial to this content such as links, reference material, etc." }, { "alpha_fraction": 0.7343618869781494, "alphanum_fraction": 0.7385544180870056, "avg_line_length": 37.727272033691406, "blob_id": "c009cf54bf4e81807f55fc7f995672a9de8263d6", "content_id": "ae694c7733b20f57ab7f24f992f6a4baa8fbbbe1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5963, "license_type": "permissive", "max_line_length": 478, "num_lines": 154, "path": "/design-docs/module04.md", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "# Automate Node.js, .NET, and Python deployments with GitHub Actions and Azure\n\nLearning path title: Build serverless, full stack applications in Azure\n\n*Add the working title [(Title guidance)](/help/learn/id-guidance-title)*\n\n## Role(s)\n\n- developer\n- database-administrator\n- solution-architect\n- devops-engineer\n- student\n- data-engineer\n\n## Level\n\n- intermediate\n\n## Product(s)\n\n- azure-sql-database\n- github\n- vs-code\n- azure-functions\n- azure-logic-apps\n- azure-app-service-static\n- azure-web-apps\n- azure-devops\n\n\n## Prerequisites\n- Module: Develop modern applications with built-in capabilities of Azure SQL Database\n- Module: Deploy backend APIs with Azure Functions, Logic Apps, and Azure SQL Database\n- Module: Build full stack applications with Azure Static Web Apps and Azure SQL Database\n\n## Summary\n\nIn the language of your choice (Node.js, Python, or .NET), deploy an end-to-end solution to solve the real-world scenario of catching the bus. You will learn how to build a solution that leverages Azure SQL Database, Azure Functions, Azure Static Web Apps, Logic Apps, Visual Studio Code and GitHub Actions. This is the final module in the learning path that puts all of the pieces together, so if there are pieces you want to go deeper into, you can review the earlier modules.\n\n## Learning objectives\n\n1. Build multi-service solutions in Azure which leverage Azure SQL Database\n1. Create web applications quickly by leveraging Azure Static Web Apps\n1. Design APIs in any language with Azure Functions \n1. Forumulate GitHub Actions to build CI/CD for Azure applications and services\n\n## Chunk your content into subtasks\n\nIdentify the subtasks of *module title*\n\n| Subtask | What part of the introduction scenario does this subtask satisfy? | How will you assess it: **Exercise or Knowledge check**? | Which learning objective(s) does this help meet? | Does the subtask have enough learning content to justify an entire unit? If not, which other subtask will you combine it with? |\n| ---- | ---- | ---- | ---- | ---- |\n| TODO | TODO | TODO | TODO | TODO |\n| TODO | TODO | TODO | TODO | TODO |\n| TODO | TODO | TODO | TODO | TODO |\n\n## Outline the units\n\n*Add more units as needed for your content*\n\n1. **Introduction**\n\n Provide a scenario of a real-world job-task that shows how the technology is used in practice:\n\n *Catch the bus scenario*\n\n1. **Catching the bus with Azure**\n\n List the content that will enable the learner to *subtask*:\n\n - Review of the scenario requirements and the the elements of the solution to build the architecture\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Azure SQL Database\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Azure Functions + Logic Apps\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Azure static web apps\n - GitHub Actions (or Azure DevOps alternatives)\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 questions): question about Azure SQL Database in the architecture\n - Multiple choice (4 questions): question about Azure SQL Database in the architecture #2\n\n1. **Exercise - Deploy and configure resources**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Fork repo\n 1. Deploy all resources with scripts\n 1. Configure Logic App connection to outlook\n\n1. **Diving deeper into GitHub Actions**\n\n List the content that will enable the learner to *subtask*:\n\n - Recap of what's been used in the scenario for GitHub Actions\n - Information needed to accomplish the enabling objective\n - Note that alternative is Azure Pipelines\n - GitHub actions for Azure SQL Database\n - SQL Actions (state based)\n - DbUp (migration based)\n - GitHub secrets\n - Information needed to accomplish the enabling objective\n - Information needed to accomplish the enabling objective\n - Configuring yml files and monitoring deployments\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 questions): Difference between state based and migration based\n - Multiple choice (4 questions): Snippet for managing / monitoring yml files\n\n1. **Exercise - Configure GitHub deployments and monitor the solution**\n\n List the steps which apply the learning content from previous unit:\n\n 1. Configure GitHub secrets\n 1. Configure yml and re-deploy\n 1. Monitor deployments in GitHub and portal and actual app\n\n1. **Build full stack applications with Azure SQL Database**\n\n List the content that will enable the learner to *subtask*:\n\n - Recap of how Azure SQL Database contributed to the solution\n - Information needed to accomplish the enabling objective\n - Note that alternative is Azure Pipelines\n - Top 5 other developer related capabilities that we want to share with this audience\n\n\n **Knowledge check**\n\n What types of questions will test *learning objective*? *[(Knowledge check guidance)](/help/learn/id-guidance-knowledge-check)*\n\n - Multiple choice (4 questions): Difference between state based and migration based\n - Multiple choice (4 questions): Snippet for managing / monitoring yml files\n\n1. **Summary**\n\n How did you solve the problem in the initial scenario with the knowledge learned in the module? \n \n *Add your summary [(Summary guidance)](/help/learn/id-guidance-module-summary-unit)*\n\n## Notes\n\nNote any additional information that may be beneficial to this content such as links, reference material, etc." }, { "alpha_fraction": 0.6988382339477539, "alphanum_fraction": 0.7012213468551636, "avg_line_length": 33.96875, "blob_id": "73a90bf6bd4e727a2a840a72783352b06ed36ea0", "content_id": "3ca8dfb9a841d6e2636d4065ddc6128b9e9fba41", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3357, "license_type": "permissive", "max_line_length": 222, "num_lines": 96, "path": "/azure-function/node/GetBusData/index.js", "repo_name": "hndgy/serverless-full-stack-apps-azure-sql", "src_encoding": "UTF-8", "text": "/* eslint-disable no-unused-vars */\n/* eslint-disable no-use-before-define */\nconst fetch = require('node-fetch');\nconst sql = require('mssql');\n\nconst AZURE_CONN_STRING = process.env.AzureSQLConnectionString;\nconst GTFS_RT_FEED = process.env.RealTimeFeedUrl;\nconst LOGIC_APP_URL = process.env.LogicAppUrl;\n\nmodule.exports = async function (context, myTimer) {\n // Get the routes we want to monitor\n const routes = await GetMonitoredRoutes();\n\n // Get the real-time bus location feed\n const feed = await GetRealTimeFeed();\n\n // Filter only the routes we want to monitor\n const buses = feed.entity.filter((e) => routes.includes(parseInt(e.vehicle.trip.route_id)));\n\n context.log(`Received ${feed.entity.length} buses positions, found ${buses.length} buses in monitored routes`);\n\n // Push data to Azure SQL and get the activated geofences\n const activatedGeofences = await ProcessGeoFences(context, buses);\n\n // Send notifications\n // (using 'map' instead of 'forEach' to make sure all calls are awaited: https://advancedweb.hu/how-to-use-async-functions-with-array-foreach-in-javascript/)\n await Promise.all(\n activatedGeofences.map(async (gf) => {\n context.log(\n `Vehicle ${gf.VehicleId}, route ${gf.RouteId}, ${gf.GeoFenceStatus} Geofence ${gf.GeoFence} at ${gf.TimestampUTC} UTC.`,\n );\n await TriggerLogicApp(context, gf);\n }),\n );\n};\n\nasync function GetMonitoredRoutes() {\n const pool = await sql.connect(AZURE_CONN_STRING);\n const queryResult = await pool.request().execute('web.GetMonitoredRoutes');\n const monitoredRutes = JSON.parse(queryResult.recordset[0].MonitoredRoutes);\n if (monitoredRutes == null) return [];\n return monitoredRutes.map((i) => i.RouteId);\n}\n\nasync function GetRealTimeFeed() {\n const response = await fetch(GTFS_RT_FEED);\n const feed = await response.json();\n\n return feed;\n}\n\nasync function ProcessGeoFences(context, buses) {\n const busData = buses.map((e) => ({\n DirectionId: e.vehicle.trip.direction_id,\n RouteId: e.vehicle.trip.route_id,\n VehicleId: e.vehicle.vehicle.id,\n Position: {\n Latitude: e.vehicle.position.latitude,\n Longitude: e.vehicle.position.longitude,\n },\n TimestampUTC: new Date(e.vehicle.timestamp * 1000),\n }));\n\n const pool = await sql.connect(AZURE_CONN_STRING);\n const queryResult = await pool\n .request()\n .input('payload', sql.NVarChar, JSON.stringify(busData))\n .execute('web.AddBusData');\n\n const geoFences = JSON.parse(queryResult.recordset[0].ActivatedGeoFences);\n if (geoFences == null) return [];\n\n context.log(`Found ${geoFences.length} buses activating a geofence`);\n\n return geoFences;\n}\n\nasync function TriggerLogicApp(context, geofence) {\n const content = {\n value1: geofence.VehicleId,\n value2: geofence.GeoFenceStatus,\n };\n\n context.log(`Calling Logic App webhook for ${geofence.VehicleId}`);\n\n try {\n const response = await fetch(LOGIC_APP_URL, {\n method: 'post',\n body: JSON.stringify(content),\n headers: { 'Content-Type': 'application/json' },\n });\n if (response.ok) { context.log(`[${geofence.VehicleId}/${geofence.DirectionId}/${geofence.GeoFenceId}] WebHook called successfully`); } else context.log(`Error calling Logic App. HTTP Response is: ${response.status}`);\n } catch (err) {\n context.log(`Error calling Logic App. Error is: ${err}`);\n }\n}\n" } ]
14
Udius/lyceumProject
https://github.com/Udius/lyceumProject
af4dc57ce1bccbe110c4ed0c3a83c53e5f93e461
dfe9251478b4fb83213d2bc43ac863ccc57d7f2a
1dba0879ee202ea74bd491a7888cf96cc23f23af
refs/heads/master
2023-03-06T05:28:10.025937
2021-02-15T13:25:33
2021-02-15T13:25:33
335,948,392
0
1
null
2021-02-04T12:33:04
2021-02-04T13:31:14
2021-02-04T13:31:11
Python
[ { "alpha_fraction": 0.44565725326538086, "alphanum_fraction": 0.47138625383377075, "avg_line_length": 27.251100540161133, "blob_id": "519b66841871514513a6d6d6c36c9a18473bb3e4", "content_id": "bd31465ca41c8455a11677740f93f48fbc8d5e23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6564, "license_type": "no_license", "max_line_length": 139, "num_lines": 227, "path": "/main.py", "repo_name": "Udius/lyceumProject", "src_encoding": "UTF-8", "text": "import os, sys\nimport pygame, requests\n\nfrom pygame.locals import *\n\nfrom all_data import *\nfrom UI import *\n\n\ndef terminate():\n pygame.quit()\n if os.path.isfile(map_file):\n os.remove(map_file)\n sys.exit(1)\n\n\ndef checkEvent():\n global zoom, lastBackspaceClick\n key['mouse'] = pygame.mouse.get_pressed()\n\n image = None\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n\n elif event.type == KEYDOWN:\n if event.key == K_UP:\n key['top'] = True\n elif event.key == K_DOWN:\n key['bottom'] = True\n elif event.key == K_LEFT:\n key['left'] = True\n elif event.key == K_RIGHT:\n key['right'] = True\n elif event.key == K_1:\n key['1'] = True\n elif event.key == K_2:\n key['2'] = True\n elif event.key == K_3:\n key['3'] = True\n elif event.key == K_LCTRL:\n key['lCtrl'] = True\n elif event.key == K_v:\n key['v'] = True\n\n if event.key == K_BACKSPACE:\n key['backspace'] = True\n elif event.unicode.isalnum() or event.unicode in ' .,;:\"<>':\n '''\n Using in UI.tEdit\n '''\n textTyping[0] += event.unicode\n\n elif event.type == KEYUP:\n if event.key == K_UP:\n key['top'] = False\n elif event.key == K_DOWN:\n key['bottom'] = False\n elif event.key == K_LEFT:\n key['left'] = False\n elif event.key == K_RIGHT:\n key['right'] = False\n elif event.key == K_1:\n key['1'] = False\n elif event.key == K_2:\n key['2'] = False\n elif event.key == K_3:\n key['3'] = False\n elif event.key == K_LCTRL:\n key['lCtrl'] = False\n elif event.key == K_v:\n key['v'] = False\n elif event.key == K_BACKSPACE:\n key['backspace'] = False\n\n elif event.type == MOUSEBUTTONDOWN and 0 < zoom < 20:\n if event.button == 4:\n if zoom < 0.01:\n zoom -= 0.0005\n elif zoom < 0.1:\n zoom -= 0.01\n elif zoom < 0.3:\n zoom -= 0.05\n elif zoom < 0.6:\n zoom -= 0.1\n elif zoom < 2:\n zoom -= 0.3\n\n image = getMap()\n\n elif event.button == 5:\n if zoom < 0.01:\n zoom += 0.0005\n elif zoom < 0.1:\n zoom += 0.01\n elif zoom < 0.3:\n zoom += 0.05\n elif zoom < 0.6:\n zoom += 0.1\n elif zoom < 2:\n zoom += 0.3\n\n image = getMap()\n\n return image\n\n\ndef getMap():\n cords = ','.join(mapCords)\n map_request = f\"http://static-maps.yandex.ru/1.x/?ll={cords}&spn={str(zoom)},{str(zoom)}&l={mapType}&size=650,450\"\n response = requests.get(map_request)\n\n if not response:\n print(\"Ошибка выполнения запроса:\")\n print(map_request)\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n terminate()\n else:\n\n with open(map_file, \"wb\") as file:\n file.write(response.content)\n\n image = pygame.image.load(map_file)\n\n return image\n\n\ndef getMapForCoord(name_of_txt):\n geocoder_request = f\"http://geocode-maps.yandex.ru/1.x/?apikey=40d1649f-0493-4b70-98ba-98533de7710b&geocode={name_of_txt}1&format=json\"\n response = requests.get(geocoder_request)\n if response:\n try:\n cords = response.json()[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"][\"Point\"][\"pos\"]\n except:\n cords = None\n print('[ERROR] - wrong data format')\n else:\n print(\"Ошибка выполнения запроса:\")\n print(geocoder_request)\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n\n return cords\n\n\nui = UI()\n\nbtn = ui.newButton('search', 'Search', (WIDTH - 130, HEIGHT - 50), size=(110, 35), rectColor=(40, 40, 40))\nbtn.setFont(fonts['arial']['28'])\n\ntEdit = ui.newTextEdit('searchData', (WIDTH - 460, HEIGHT - 50), size=(310, 35))\ntEdit.setFont(fonts['arial']['22'])\n\nimage = getMap()\nwhile True:\n newImage = checkEvent()\n\n '''\n Работаем с клавишами\n '''\n speed = [0, 0]\n if key['left']:\n speed[0] -= 0.5 * zoom\n if key['right']:\n speed[0] += 0.5 * zoom\n if key['top']:\n speed[1] += 0.5 * zoom\n if key['bottom']:\n speed[1] -= 0.5 * zoom\n\n if key['lCtrl'] and key['1']:\n mapType = 'map'\n image = getMap()\n key['1'] = False\n elif key['lCtrl'] and key['2']:\n mapType = 'sat'\n image = getMap()\n key['2'] = False\n elif key['lCtrl'] and key['3']:\n mapType = 'sat,trf,skl'\n image = getMap()\n key['3'] = False\n\n '''\n Работа с интерфейсом\n '''\n\n clickedButton = ui.update()\n if clickedButton == 'search':\n inputData = ''.join(tEdit.text.split())\n\n rightData = True\n if len(inputData.split(',')) != 2:\n rightData = False\n for i in inputData.split(','):\n if i.count('.') > 1:\n rightData = False\n for j in i.split('.'):\n if not j.isdigit():\n rightData = False\n\n if not rightData:\n cords = getMapForCoord(inputData)\n if cords:\n result = cords.split()\n if result is not None:\n mapCords = result\n image = getMap()\n print('Search button clicked')\n else:\n mapCords = inputData.split(',')\n image = getMap()\n\n '''\n Запрос на новую карту, если мы перемещаемся, или карты нету впринципе\n '''\n if speed != [0, 0] or newImage is not None:\n mapCords[0] = str(float(mapCords[0]) + speed[0])\n mapCords[1] = str(float(mapCords[1]) + speed[1])\n image = getMap()\n\n # Drawing\n screen.blit(image, (0, 0))\n ui.draw(screen)\n\n # Flip & wait\n pygame.display.update()\n clock.tick(FPS)\n" }, { "alpha_fraction": 0.7343173623085022, "alphanum_fraction": 0.7343173623085022, "avg_line_length": 23.636363983154297, "blob_id": "18e554f152b570cb94f8bcbe37f7e166c1f23e45", "content_id": "05c665ae1aa2677eb819d3427ceccdd3749f21b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 430, "license_type": "no_license", "max_line_length": 76, "num_lines": 11, "path": "/README.txt", "repo_name": "Udius/lyceumProject", "src_encoding": "UTF-8", "text": "\"main.py\" - основной цикл и логическая составляющая программы\n\n\"all_data.py\" - вся основная информация (такие переменные как WIDTH, HEIGHT)\n\n\"all_classes\" - хранит все классы\n\n\n-----< Информация по управлению >-----\n\nzoom - колесико мыши\nПеремещение по карте - стрелочки\n" }, { "alpha_fraction": 0.5451586842536926, "alphanum_fraction": 0.5508543252944946, "avg_line_length": 32.216217041015625, "blob_id": "1b3567a5436cd1fe8e5a6d25df318748deb7f608", "content_id": "6a51ecc253936901ec36b380440ec16c45934158", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1229, "license_type": "no_license", "max_line_length": 94, "num_lines": 37, "path": "/all_classes.py", "repo_name": "Udius/lyceumProject", "src_encoding": "UTF-8", "text": "from all_data import *\n\n\nclass Button(pygame.sprite.Sprite):\n def __init__(self, name, cords):\n super().__init__(all_UI)\n self.absImage = ui_images[name]\n self.image = self.absImage\n self.rect = self.image.get_rect()\n self.rect.center = cords\n self.pressed = False\n self.name = name\n\n def update(self):\n mPos = pygame.mouse.get_pos()\n mousePress = pygame.mouse.get_pressed()[0]\n if self.rect.collidepoint(mPos) and (mousePress or self.pressed):\n self.pressed = True\n oldPos = self.rect.center\n w, h, = self.absImage.get_width(), self.absImage.get_height()\n self.image = pygame.transform.scale(self.absImage, (int(w * 0.95), int(h * 0.95)))\n self.rect = self.image.get_rect()\n self.rect.center = oldPos\n if not mousePress:\n self.pressed = False\n return True\n else:\n self.pressed = False\n oldPos = self.rect.center\n self.image = self.absImage\n self.rect = self.image.get_rect()\n self.rect.center = oldPos\n\n return False\n\n def move(self, x, y):\n self.rect.center = x, y\n" }, { "alpha_fraction": 0.5413371920585632, "alphanum_fraction": 0.5708123445510864, "avg_line_length": 19.159420013427734, "blob_id": "d09e35ce38e85f1afead8329c7437acf5d8cd833", "content_id": "2289503cf96a93157b4c7be325f84fbc4ebc3a21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1422, "license_type": "no_license", "max_line_length": 52, "num_lines": 69, "path": "/all_data.py", "repo_name": "Udius/lyceumProject", "src_encoding": "UTF-8", "text": "import pygame\nimport os, sys\n\n\ndef load_image(name, colorkey=None):\n fullname = os.path.join('data\\\\images', name)\n if not os.path.isfile(fullname):\n print(f\"[ERROR] No such file: '{fullname}'\")\n terminate()\n image = pygame.image.load(fullname)\n\n if colorkey is not None:\n image = image.convert()\n if colorkey == -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey)\n else:\n image.set_colorkey(colorkey)\n image = image.convert_alpha()\n\n return image\n\n\nWIDTH = 650\nHEIGHT = 450\nFPS = 60\n\npygame.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nclock = pygame.time.Clock()\npygame.display.set_caption('Lyceum project')\n\nmapCords = ['37.6156', '55.7522']\nmapType = 'map'\nspeed = [0, 0]\nzoom = 1\n\ntexts = list()\ntextTyping = ['']\n\n# Клавиши стрелок: если нажаты, то будет True\nkey = {\n 'top': False,\n 'left': False,\n 'right': False,\n 'bottom': False,\n '1': False,\n '2': False,\n '3': False,\n 'lCtrl': False,\n 'backspace': False,\n 'v': False,\n 'mouse': (False, False, False)\n}\n\nui_images = {\n 'search': load_image('search.png')\n}\n\n# ---< FILES >---\nmap_file = \"map.png\"\n\nfonts = {\n 'arial': {\n '20': pygame.font.SysFont('arial', 20),\n '22': pygame.font.SysFont('arial', 22),\n '28': pygame.font.SysFont('arial', 28)\n }\n}\n" }, { "alpha_fraction": 0.5504925847053528, "alphanum_fraction": 0.5687416195869446, "avg_line_length": 30.565370559692383, "blob_id": "42d9e27ea3a1e0a7fe2c00786f9fce1483223971", "content_id": "8414388c5e7348e5e87498eaf96e317b8a3169f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8932, "license_type": "no_license", "max_line_length": 147, "num_lines": 283, "path": "/UI.py", "repo_name": "Udius/lyceumProject", "src_encoding": "UTF-8", "text": "import pygame\nfrom pyperclip import paste\n\nfrom pygame.locals import *\n\nfrom all_data import *\n\n\ndef makeBrighter(mainColor, extraColor):\n r, g, b = mainColor\n\n if r + extraColor[0] <= 255:\n r += extraColor[0]\n else:\n r = 255\n\n if g + extraColor[1] <= 255:\n g += extraColor[1]\n else:\n g = 255\n\n if b + extraColor[2] <= 255:\n b += extraColor[2]\n else:\n b = 255\n\n newColor = (r, g, b)\n return newColor\n\n\ndef createText(text, cords, font=fonts['arial']['20'], color=(0, 0, 0), name=None, textDict=texts, alignment=True):\n if name is None:\n name = text\n x, y = cords\n\n if alignment is True or alignment == 'horizontal':\n x -= font.size(text)[0] // 2\n if alignment is True or alignment == 'vertical':\n y -= font.size(text)[1] // 2\n\n txt = font.render(text, False, color)\n if textDict is not None:\n textDict[name] = [txt, (x, y)]\n else:\n return [txt, (x, y)]\n\n\nclass UI:\n def __init__(self):\n # {objectName: object}\n self.buttons = dict()\n self.textEdits = dict()\n self.labels = list()\n\n def update(self):\n for btn in list(self.buttons.values()):\n clickedButton = btn.update()\n if clickedButton is not None:\n return clickedButton\n\n for tEdit in list(self.textEdits.values()):\n clickedButton = tEdit.update()\n if clickedButton is not None:\n return clickedButton\n\n def draw(self, surf):\n for btn in list(self.buttons.values()):\n pygame.draw.rect(surf, btn.rectColor, btn.rect)\n pygame.draw.rect(surf, (0, 0, 0), btn.rect, width=2)\n surf.blit(btn.textData[0], btn.textData[1])\n\n for tEdit in list(self.textEdits.values()):\n pygame.draw.rect(surf, tEdit.rectColor, tEdit.rect)\n pygame.draw.rect(surf, (0, 0, 0), tEdit.rect, width=3)\n surf.blit(tEdit.textData[0], tEdit.textData[1])\n\n def newButton(self, name, text='', pos=(0, 0), size=(90, 30), rectColor=(0, 0, 0)):\n btn = Button(name, text, pos, size, rectColor=rectColor)\n self.buttons[name] = btn\n return btn\n\n def newTextEdit(self, name, pos=(0, 0), size=(90, 30), rectColor=(170, 170, 170)):\n tEdit = TextEdit(name, pos, size, rectColor)\n self.textEdits[name] = tEdit\n return tEdit\n\n def clear(self):\n self.buttons = dict()\n self.textEdits = dict()\n\n\nclass Button(pygame.sprite.Sprite):\n def __init__(self, name, text, pos, size, rectColor=(0, 0, 0)):\n super().__init__()\n self.name = name\n self.text = text\n self.pos = pos\n self.size = size\n\n self.textColor = (255, 255, 255)\n self.constRectColor = rectColor\n self.rectColor = rectColor\n\n self.font = fonts['arial']['20']\n self.pressed = self.chosen = False\n\n self.rect = pygame.Rect(pos, size)\n self.rect.move(pos[0], pos[1])\n\n txtPos = (self.rect.center[0], self.rect.center[1])\n self.textData = createText(self.text, txtPos, self.font, self.textColor, name=self.text + 'Button', textDict=None)\n\n def update(self):\n mx, my = pygame.mouse.get_pos()\n leftClick = key['mouse'][0]\n\n # ___ Click's logic ___\n if self.pressed and self.chosen and not leftClick:\n key['mouse'] = (False, False, False)\n self.pressed = self.chosen = False\n return self.name\n\n if self.rect.collidepoint(mx, my):\n if leftClick:\n self.pressed = True\n\n self.chosen = True\n\n else:\n self.chosen = False\n\n # ___ Button img scale ___\n if self.pressed and self.chosen:\n newColor = makeBrighter(self.constRectColor, (40, 30, 30))\n self.rectColor = newColor\n\n elif self.chosen:\n newColor = makeBrighter(self.constRectColor, (15, 15, 15))\n self.rectColor = newColor\n\n else:\n self.rectColor = self.constRectColor\n\n def setText(self, text, color=(255, 255, 255)):\n self.text = text\n self.textColor = color\n\n txtPos = (self.rect.center[0], self.rect.center[1])\n self.textData = createText(self.text, txtPos, self.font, self.textColor, name=self.text + 'Button', textDict=None)\n\n def setFont(self, font):\n self.font = font\n\n txtPos = (self.rect.center[0], self.rect.center[1])\n self.textData = createText(self.text, txtPos, self.font, self.textColor, name=self.text + 'Button', textDict=None)\n\n def move(self, x, y, moveType='topleft'):\n self.pos = (x, y)\n\n # topleft, topright, center etc.\n if moveType == 'topleft':\n self.rect.topleft = self.pos\n elif moveType == 'topright':\n self.rect.topright = self.pos\n elif moveType == 'bottomleft':\n self.rect.bottomleft = self.pos\n elif moveType == 'bottomright':\n self.rect.bottomright = self.pos\n elif moveType == 'center':\n self.rect.center = self.pos\n\n txtPos = (self.rect.center[0], self.rect.center[1])\n self.textData = createText(self.text, txtPos, self.font, self.textColor, name=self.text + 'Button', textDict=None)\n\n\nclass TextEdit(pygame.sprite.Sprite):\n def __init__(self, name, pos, size, rectColor):\n super().__init__()\n self.name = name\n self.pos = pos\n self.size = size\n self.text = ''\n\n self.textColor = (0, 0, 0)\n self.constRectColor = rectColor\n self.rectColor = rectColor\n\n self.font = fonts['arial']['20']\n self.selected = False\n\n self.rect = pygame.Rect(pos, size)\n\n txtPos = (self.rect.left + 6, self.rect.center[1])\n self.textData = createText(self.text, txtPos, self.font, self.textColor, name=self.text + 'tEdit', textDict=None, alignment='vertical')\n\n def update(self):\n global textTyping\n\n mx, my = pygame.mouse.get_pos()\n leftClick = key['mouse'][0]\n\n # ___ Click's logic ___\n if self.rect.collidepoint(mx, my) and leftClick:\n if not self.selected:\n textTyping[0] = self.text\n self.selected = True\n\n # ___ Button scale ___\n if self.selected:\n self.typingText()\n newColor = makeBrighter(self.constRectColor, (60, 60, 60))\n self.rectColor = newColor\n else:\n self.rectColor = self.constRectColor\n\n def typingText(self):\n global lastBackspaceClick\n if self.rect.width - self.font.size(textTyping[0])[0] >= 14:\n self.text = textTyping[0]\n\n txtPos = (self.rect.left + 6, self.rect.center[1])\n self.textData = createText(self.text, txtPos, self.font, self.textColor, name=self.text + 'tEdit', textDict=None, alignment='vertical')\n else:\n textTyping[0] = self.text\n\n if key['lCtrl'] and key['v']:\n self.paste()\n\n if key['backspace']:\n now = pygame.time.get_ticks() // 10\n if now - lastBackspaceClick >= 10:\n lastBackspaceClick = pygame.time.get_ticks() // 10\n textTyping[0] = textTyping[0][:-1]\n\n def setText(self, text, color=(0, 0, 0)):\n self.text = text\n self.textColor = color\n\n txtPos = (self.rect.left + 6, self.rect.center[1])\n self.textData = createText(self.text, txtPos, self.font, self.textColor, name=self.text + 'tEdit', textDict=None, alignment='vertical')\n\n def setFont(self, font):\n self.font = font\n\n txtPos = (self.rect.left + 6, self.rect.center[1])\n self.textData = createText(self.text, txtPos, self.font, self.textColor, name=self.text + 'tEdit', textDict=None, alignment='vertical')\n\n def move(self, x, y, moveType='topleft'):\n self.pos = (x, y)\n\n # topleft, topright, center etc.\n if moveType == 'topleft':\n self.rect.topleft = self.pos\n elif moveType == 'topright':\n self.rect.topright = self.pos\n elif moveType == 'bottomleft':\n self.rect.bottomleft = self.pos\n elif moveType == 'bottomright':\n self.rect.bottomright = self.pos\n elif moveType == 'center':\n self.rect.center = self.pos\n\n txtPos = (self.rect.center[0], self.rect.center[1])\n self.textData = createText(self.text, txtPos, self.font, self.textColor, name=self.text + 'tEdit', textDict=None)\n\n def clear(self):\n global textTyping\n\n self.text = ''\n textTyping[0] = ''\n\n def paste(self):\n global textTyping\n key['lCtrl'] = False\n key['v'] = False\n\n txt = paste()\n if self.rect.width - self.font.size(txt)[0] >= 14:\n self.text = txt\n textTyping[0] = txt\n\n\nlastBackspaceClick = pygame.time.get_ticks() // 10" } ]
5
haiyanzzz/Questionnaire
https://github.com/haiyanzzz/Questionnaire
36218386a438f676b4fc74525379bcafab56b7c5
559e0cd20c6e8a53728a10e88c645ccc93471eb7
9388d8d0e050fdf963e8efed7de57aab335fd39d
refs/heads/master
2021-05-06T11:20:03.287618
2017-12-14T08:22:34
2017-12-14T08:22:34
114,222,786
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8275862336158752, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 33.79999923706055, "blob_id": "fa7fb63ac190254555d6476a330c136ae830484c", "content_id": "b953dc3fd12b61fb71e90f61b17f087cf7810efd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/app01/admin.py", "repo_name": "haiyanzzz/Questionnaire", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom app01 import models\n# Register your models here.\nadmin.site.register(models.Student)\nadmin.site.register(models.ClassList)\nadmin.site.register(models.Questions)\nadmin.site.register(models.UserInfo)\nadmin.site.register(models.Answer)\nadmin.site.register(models.Questionnaire)\nadmin.site.register(models.Option)\n" }, { "alpha_fraction": 0.48813867568969727, "alphanum_fraction": 0.5127737522125244, "avg_line_length": 22.29787254333496, "blob_id": "748997342da16a59f1894640584b9b23aaf690f9", "content_id": "c941922ec9a1c34f74c8a49b5f71bba66126c2ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1200, "license_type": "no_license", "max_line_length": 80, "num_lines": 47, "path": "/测试.py", "repo_name": "haiyanzzz/Questionnaire", "src_encoding": "UTF-8", "text": "#!usr/bin/env python\n# -*- coding:utf-8 -*-\n# 如果获取的数据不是直接可以展示的结构\n# 方式一\nuser_list = [\n {\"id\":222,\"name\":\"haiyan\",\"age\":33},\n {\"id\":2,\"name\":\"zzzzz\",\"age\":13}\n]\nnew_user_list = []\nfor item in user_list:\n item[\"age\"] = item[\"id\"]+item[\"age\"]\n new_user_list.append(item[\"age\"])\n# print(new_user_list)\n# print(user_list)\n\n# 方式二:利用迭代器\ndef test():\n user_list = [\n {\"id\":222,\"name\":\"haiyan\",\"age\":33},\n {\"id\":2,\"name\":\"zzz\",\"age\":13}\n ]\n for item in user_list:\n yield {\"id\":item[\"id\"],\"name\":item[\"name\"],\"age\":item[\"id\"]+item[\"age\"]}\nobj = test()\n# print(obj.__next__())\n# print(next(obj))\n\n# 方式三:利用类、__iter__、yield实现\nclass Foo(object):\n def __init__(self,arg):\n self.arg = arg\n def __iter__(self):\n for item in self.arg:\n yield item\n yield {\"age\":item[\"age\"]+item[\"id\"]}\ndef test():\n user_list = [\n {\"id\": 222, \"name\": \"haiyan\", \"age\": 33},\n {\"id\": 2, \"name\": \"zzz\", \"age\": 13}\n ]\n obj = Foo(user_list) #一实例化类就会去调用__init__方法\n for i in obj:\n print(i)\ntest()\n\nl = [1,2]\nprint(len(l))\n\n" }, { "alpha_fraction": 0.5254833102226257, "alphanum_fraction": 0.5694200396537781, "avg_line_length": 22.70833396911621, "blob_id": "0582e6ed121dfac4bb521d4680d12db66c5015f2", "content_id": "4e8eaa8f02525e55e7aeca6f18a384cf3014efc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 577, "license_type": "no_license", "max_line_length": 71, "num_lines": 24, "path": "/app01/migrations/0002_auto_20171204_2035.py", "repo_name": "haiyanzzz/Questionnaire", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-04 12:35\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app01', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Ansower',\n new_name='Answer',\n ),\n migrations.AlterField(\n model_name='questions',\n name='caption',\n field=models.CharField(max_length=32, verbose_name='问题题目'),\n ),\n ]\n" }, { "alpha_fraction": 0.6370871663093567, "alphanum_fraction": 0.6462395787239075, "avg_line_length": 31.649351119995117, "blob_id": "4a146b9a716885161f56fc91e0f84bdeebd0ad92", "content_id": "0d92a7f26d56fd0674cb0d917e65d9a2106fd21b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2833, "license_type": "no_license", "max_line_length": 87, "num_lines": 77, "path": "/app01/models.py", "repo_name": "haiyanzzz/Questionnaire", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass UserInfo(models.Model):\n '''员工表'''\n username = models.CharField(max_length=64,verbose_name=\"用户名\")\n password = models.CharField(max_length=32,verbose_name=\"用户密码\")\n def __str__(self):\n return self.username\n class Meta:\n verbose_name_plural=\"员工表\"\n\nclass ClassList(models.Model):\n '''班级表'''\n title = models.CharField(max_length=32,verbose_name=\"班级名\")\n def __str__(self):\n return self.title\n class Meta:\n verbose_name_plural = \"班级表\"\n\nclass Student(models.Model):\n '''学生表'''\n name = models.CharField(max_length=32,verbose_name=\"学生姓名\")\n password = models.CharField(max_length=32,verbose_name=\"学生密码\")\n cls = models.ForeignKey(to=\"ClassList\",verbose_name=\"所属班级\")\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = \"学生表\"\nclass Questionnaire(models.Model):\n '''问卷表'''\n title = models.CharField(max_length=32,verbose_name=\"问卷名\")\n cls = models.ForeignKey(to=\"ClassList\",verbose_name=\"问卷班级\")\n create_user = models.ForeignKey(to=\"UserInfo\",verbose_name=\"创建问卷的用户\")\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name_plural = \"问卷表\"\nclass Questions(models.Model):\n '''问卷问题表'''\n caption = models.CharField(max_length=32,verbose_name=\"问题题目\")\n type_choices = (\n (1,\"打分\"),\n (2,\"单选\"),\n (3,\"评价\")\n )\n question_type = models.IntegerField(choices=type_choices,verbose_name=\"问题类型\")\n questionnaire = models.ForeignKey(to=\"Questionnaire\",default=1,verbose_name=\"所属问卷\")\n def __str__(self):\n return self.caption\n\n class Meta:\n verbose_name_plural = \"问卷问题表\"\nclass Answer(models.Model):\n '''问卷回答表''' #谁什么时候对那个问题作答了\n student = models.ForeignKey(to=\"Student\",verbose_name=\"所属学生\")\n queston = models.ForeignKey(to=\"Questions\",verbose_name=\"所属问题\")\n val = models.IntegerField(null=True,blank=True,verbose_name=\"数字答案\")\n content = models.CharField(max_length=255,null=True,blank=True,verbose_name=\"文本答案\")\n def __str__(self):\n return self.content\n\n class Meta:\n verbose_name_plural = \"问卷回答表\"\n\nclass Option(models.Model):\n '''问卷单选题的选项表'''\n name = models.CharField(verbose_name=\"选项名称\",max_length=32)\n score = models.IntegerField(verbose_name=\"选项对应的分值\")\n question_id = models.ForeignKey(to=\"Questions\",verbose_name=\"所属问题\")\n def __str__(self):\n return str(self.score)\n\n class Meta:\n verbose_name_plural = \"问卷单选题的选项表\"" }, { "alpha_fraction": 0.6734374761581421, "alphanum_fraction": 0.676562488079071, "avg_line_length": 29.5238094329834, "blob_id": "1ebecb5254618aebbda7df03621c8d29f47dbfe9", "content_id": "8670a6d53b0eb2e2436ed6d97b2c528c80c11c64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "no_license", "max_line_length": 58, "num_lines": 21, "path": "/app01/views.py", "repo_name": "haiyanzzz/Questionnaire", "src_encoding": "UTF-8", "text": "from django.db.models.aggregates import Count\nfrom django.shortcuts import render\nfrom app01 import models\n# Create your views here.\ndef index(request):\n Questionnaire_obj = models.Questionnaire.objects.all()\n #查询每一个班级的学生个数\n class_obj = models.ClassList.objects.all()\n li = []\n for i in class_obj:\n li.append(i.student_set.all().count())\n answer_obj = models.Answer.objects.all()\n l = []\n for i in answer_obj:\n # print(i.student_id)\n l.append(i.student_id)\n # L=len(l)\n return render(request,\"index.html\",locals())\n\ndef questionedit(request):\n return render(request,\"questionedit.html\")" }, { "alpha_fraction": 0.5236647725105286, "alphanum_fraction": 0.5343030691146851, "avg_line_length": 39.76106262207031, "blob_id": "48fb0da457fb78d16bb45fafc84b5ea417394bd0", "content_id": "040153a22350c63d8b3a4f228554f43f6e60a4dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4822, "license_type": "no_license", "max_line_length": 131, "num_lines": 113, "path": "/app01/migrations/0001_initial.py", "repo_name": "haiyanzzz/Questionnaire", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-12-04 12:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Ansower',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('val', models.IntegerField(blank=True, null=True, verbose_name='数字答案')),\n ('content', models.CharField(blank=True, max_length=255, null=True, verbose_name='文本答案')),\n ],\n options={\n 'verbose_name_plural': '问卷回答表',\n },\n ),\n migrations.CreateModel(\n name='ClassList',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=32, verbose_name='班级名')),\n ],\n options={\n 'verbose_name_plural': '班级表',\n },\n ),\n migrations.CreateModel(\n name='Option',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('score', models.IntegerField(verbose_name='选项对应的分值')),\n ],\n options={\n 'verbose_name_plural': '问卷单选题的选项表',\n },\n ),\n migrations.CreateModel(\n name='Questionnaire',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=32, verbose_name='问卷名')),\n ('cls', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.ClassList', verbose_name='问卷班级')),\n ],\n options={\n 'verbose_name_plural': '问卷表',\n },\n ),\n migrations.CreateModel(\n name='Questions',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('caption', models.CharField(max_length=32, verbose_name='用户密码')),\n ('question_type', models.IntegerField(choices=[(1, '打分'), (2, '单选'), (3, '评价')], verbose_name='问题类型')),\n ],\n options={\n 'verbose_name_plural': '问卷问题表',\n },\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=32, verbose_name='学生姓名')),\n ('password', models.CharField(max_length=32, verbose_name='学生密码')),\n ('cls', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.ClassList', verbose_name='所属班级')),\n ],\n options={\n 'verbose_name_plural': '学生表',\n },\n ),\n migrations.CreateModel(\n name='UserInfo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(max_length=64, verbose_name='用户名')),\n ('password', models.CharField(max_length=32, verbose_name='用户密码')),\n ],\n options={\n 'verbose_name_plural': '员工表',\n },\n ),\n migrations.AddField(\n model_name='questionnaire',\n name='create_user',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.UserInfo', verbose_name='创建问卷的用户'),\n ),\n migrations.AddField(\n model_name='option',\n name='question_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Questions', verbose_name='所属问题'),\n ),\n migrations.AddField(\n model_name='ansower',\n name='queston',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Questions', verbose_name='所属问题'),\n ),\n migrations.AddField(\n model_name='ansower',\n name='student',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Student', verbose_name='所属学生'),\n ),\n ]\n" } ]
6
Kalebu/pycomm3
https://github.com/Kalebu/pycomm3
7f9d78e9e984118abbf9857941d79614a13d1977
e2d835991ff3ac0df1c33e9f20655fd3827cd550
71caa1de30e0d7b06da7ba85eaab9d38732ea6ef
refs/heads/master
2023-02-15T19:54:25.929647
2020-12-10T14:25:20
2020-12-10T14:25:20
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6237218976020813, "alphanum_fraction": 0.6257668733596802, "avg_line_length": 23.399999618530273, "blob_id": "ab2d2a300f7c99c8d3ab92bc6a7ac8d2577eeea0", "content_id": "d25baa8cfe37452e562dc8f11e97fc5b36b64264", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "permissive", "max_line_length": 94, "num_lines": 20, "path": "/tests/test_misc.py", "repo_name": "Kalebu/pycomm3", "src_encoding": "UTF-8", "text": "import datetime\n\n\ndef test_get_time(plc):\n time = plc.get_plc_time()\n assert time\n assert time.value['string'] == time.value['datetime'].strftime('%A, %B %d, %Y %I:%M:%S%p')\n\n\ndef test_set_time(plc):\n assert plc.set_plc_time()\n\n\ndef test_get_module_info(plc):\n info = plc.get_module_info(0)\n assert info\n assert info['vendor']\n assert info['product_code']\n assert f\"{info['version_major']}.{info['version_minor']}\" == info['revision']\n assert info['serial']\n\n" }, { "alpha_fraction": 0.469696968793869, "alphanum_fraction": 0.6060606241226196, "avg_line_length": 10.083333015441895, "blob_id": "db810902f8b7cfaa069a8d33d7b300be68d18cc1", "content_id": "93d4c479ff7a3f231c9812aba4503d176a84e283", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 132, "license_type": "permissive", "max_line_length": 38, "num_lines": 12, "path": "/tox.ini", "repo_name": "Kalebu/pycomm3", "src_encoding": "UTF-8", "text": "[tox]\nenvlist = py3.6.1, py3.7, py3.8, py3.9\n\n[testenv]\ndeps =\n pytest\n\ncommands =\n pytest\n\nsetenv =\n PLCPATH=10.61.50.4/10" } ]
2
vinay10949/cvdapp
https://github.com/vinay10949/cvdapp
3eb14b72bb5c5341ab3caaf46b926980b1a996b0
5c9e2de5ea651740a91a407a76c6a7b493241ac2
6696d599f0ea9e3858583ce60e5b41266255eb67
refs/heads/master
2022-07-16T08:53:12.031209
2020-05-16T11:54:44
2020-05-16T11:54:44
264,417,184
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7300000190734863, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 13.142857551574707, "blob_id": "31b8bb12fa9fc61fe6a74cf9db7424934a84c149", "content_id": "40e26457cff6296ab1eb355250a29b6101a0b05e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 100, "license_type": "no_license", "max_line_length": 29, "num_lines": 7, "path": "/README.md", "repo_name": "vinay10949/cvdapp", "src_encoding": "UTF-8", "text": "# cvd-app\ncvd web app and simple report\n\n# Required Files\n1 setup.sh\n2 Procfile\n3 requirements.txt\n\n" }, { "alpha_fraction": 0.6896774768829346, "alphanum_fraction": 0.7070327401161194, "avg_line_length": 44.13450241088867, "blob_id": "e35af7d0cbb808feb379e15125378d6ea2599ddf", "content_id": "c99ad190d398c9437654ee0e27c4af0e04d4b0c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7721, "license_type": "no_license", "max_line_length": 189, "num_lines": 171, "path": "/cvd.py", "repo_name": "vinay10949/cvdapp", "src_encoding": "UTF-8", "text": "#id;age;gender;height;weight;ap_hi;ap_lo;cholesterol;gluc;smoke;alco;active;cardio\n\n#['gender_1','alco','active','blood_pressure_level','ap_lo','weight','bmi','age_bucket','cholesterol','ap_hi']\nimport streamlit as st,shap\nimport pandas as pd\nimport matplotlib, matplotlib.pyplot as pl\nimport feature_engine\nimport numpy as np\nfrom PIL import Image\nimport pickle\n#loaded_model = pickle.load(open(\"xgboost_cv_best_pickle.dat\", \"rb\"))\n#st.write('Note that this model is previously fitted and loaded here, due to performance reasons')\nimport catboost\nfrom sklearn.externals import joblib\n\njoblib_file = \"model/joblib_catboost_Model.pkl\" \ncatboost_calibrated_model = joblib.load(joblib_file)\n\nohe_file = \"model/ohe.pkl\" \nohe_enc = joblib.load(ohe_file)\n\njamesStienenc_file = \"model/jamesStienenc.pkl\" \njamesStienenc_enc = joblib.load(jamesStienenc_file)\n\n\nageDiscretizer_file = \"model/ageDiscretizer.pkl\" \nageDiscretizer = joblib.load(ageDiscretizer_file)\ntest=pd.read_csv(\"test.csv\", index_col=None)\ntest.drop('Unnamed: 0',axis=1,inplace=True)\nexplainer = shap.TreeExplainer(catboost_calibrated_model.base_estimator)\ntestShapValues=explainer.shap_values(test)\t\t\n\n\n\ndef calculateBloodPressureLevel(data):\n if (data['ap_hi'] < 120) and (data['ap_lo'] < 80):\n return 'Normal'\n if (data['ap_hi'] >= 120 and data['ap_hi'] <=129) and (data['ap_lo'] < 80):\n return 'Elevated'\n if (data['ap_hi'] >= 130 and data['ap_hi'] <=139) | (data['ap_lo'] >= 80 and data['ap_lo'] <=89):\n return 'Stage1HyperTension'\n if (data['ap_hi'] >= 140) | (data['ap_lo'] >= 90):\n return 'Stage2HyperTension'\n if (data['ap_hi'] >= 180) | (data['ap_lo'] >= 120):\n return 'HypertensiveCrisis'\n\ndef BMI(data):\n return data['weight'] / (data['height']/100)**2\n \ndef main():\n\timg=Image.open(\"images/heart.png\")\n\tme=Image.open(\"images/vinay.jpg\")\n\tst.sidebar.image(me,width=200)\t\n\tst.sidebar.subheader(\"Name : Vinay Sawant\")\n\tst.sidebar.subheader(\"Email : [email protected]\")\n\tst.image(img,width=200,caption='Save Lives')\n\tst.subheader('Created by: Vinay Sawant')\n\n\tst.title(\"Cardio Vascular Detection\")\n\n\tst.header(\"Whats your gender ? \")\n\tgender=st.radio(\"Gender\",[1,0])\n\n\n\tst.header(\"Whats your height ? \")\n\theight=st.slider(\"Height(cms)\",152,192)\n\n\n\tst.header(\"Whats your Weight ? \")\n\tweight=st.slider(\"Weight in Kgs\",55,150)\n\n\n\tst.header(\"Whats your Age ? \")\n\tage=st.slider(\"Age in days\",10585,36500)\n\n\n\tst.header(\"What is your Systolic blood pressure? \")\n\tapHi=st.slider(\"Sbp\",100,170)\n\n\tst.header(\"What is your Diastolic blood pressure \")\n\tapLo=st.slider(\"Dbp\",60,100)\n\n\tst.header(\"Whats your cholesterol level ?\")\n\tcholesterol=st.radio(\"Cholesterol Level\",[\"Normal\",\"AboveNormal\",\"WellAboveNormal\"])\n\t\n\tst.header(\"Whats your glucose level ?\")\n\tgluc=st.radio(\"Glucose level\",[\"Normal\",\"AboveNormal\",\"WellAboveNormal\"])\n\n\tst.header(\"Do you smoke ?\")\n\tsmoke=st.radio(\" \",[\"Yes\",\"No\"])\n\n\tst.header(\"Do you consume alcohol ?\")\n\talco=st.radio(\" \",[\"Yes\",\"No\"])\n\n\tst.header(\"Are you physically active,Do you work out ? \")\n\tactive=st.radio(\" \",[\"Yes\",\"No\"])\n\tst.title('Explaining the model')\n\tst.write('below, all seperate decision trees that have been build by training the model can be reviewed')\t\t\n\tst.write('To handle this inconsitency, SHAP values give robust details, among which is feature importance') \n\tst.write(catboost_calibrated_model.base_estimator.plot_tree(tree_idx=0))\n\tb=st.button(\"Submit\", key=None)\n\tif b: \n\t\tage=age/365\n\t\t#id;age;gender;height;weight;ap_hi;ap_lo;cholesterol;gluc;smoke;alco;active\n\t\td={'age':age,'gender':gender,'weight':weight,'height':height,'ap_hi':apHi,'ap_lo':apLo,'cholesterol':cholesterol,'gluc':gluc,\n'smoke':smoke,'alco':alco,'active':active}\n\t\tdata=pd.DataFrame(d,index=[0])\n\t\tdata=ageDiscretizer.transform(data)\n\t\tdata['age_bucket']=data['age'].round(2)\n\t\tdata['bmi'] = data.apply(BMI, axis=1)\n\t\t# bucket boundaries\n\t\tbuckets = [0, 18.5, 24.9, 29.9, 1000]\n\t\t# bucket labels\n\t\tlabels = ['Underweight','Healthy','Overweight','Obese']\n\t\t# discretisation\n\t\tdata['bmi_category'] = pd.cut(data['bmi'], bins=buckets, labels=labels, include_lowest=True)\n\t\tdata['blood_pressure_level'] = data.apply(calculateBloodPressureLevel, axis=1)\n\t\tdata=ohe_enc.transform(data)\n\t\tdata=jamesStienenc_enc.transform(data)\n\t\tdata['smoke'] = data['smoke'].map({\"Yes\":1,\"No\":0})\n\t\tdata['alco'] = data['alco'].map({\"Yes\":1,\"No\":0})\n\t\tdata['active'] = data['active'].map({\"Yes\":1,\"No\":0})\n\t\tdata['cholesterol'] = data['cholesterol'].map({\"Normal\":1,\"AboveNormal\":2,\"WellAboveNormal\":3})\t\n\t\tpred=catboost_calibrated_model.predict(data[['gender_1','alco','active','blood_pressure_level','ap_lo','weight','bmi','age_bucket'\n,'cholesterol','ap_hi']])\n\t\tnewData=data[['gender_1','alco','active','blood_pressure_level','ap_lo','weight','bmi','age_bucket'\n,'cholesterol','ap_hi']]\t\t\n\t\tif [\"Yes\" if pred[0]==1 else \"No\"][0]==\"Yes\":\n\t\t\tst.warning(\"You have high probablitily of Cardio Disease,go visit Doctor immediately\")\n\t\telse:\n\t\t\tst.success(\"You are safe,you dont have Cardio disease\")\t\n\t\tst.title('Explaining the model')\n\t\tst.write('below, all seperate decision trees that have been build by training the model can be reviewed')\t\t\n\t\tst.write('To handle this inconsitency, SHAP values give robust details, among which is feature importance') \n\t\tst.write(catboost_calibrated_model.base_estimator.plot_tree(tree_idx=0))\n\t\tshap_values = explainer.shap_values(newData)\t\t\n\t\tpl.title('Assessing feature importance based on Shap values')\n\t\tshap.summary_plot(shap_values,newData,plot_type=\"bar\",show=False)\n\t\tst.pyplot(bbox_inches='tight')\n\t\tpl.clf()\n\t\tst.write('SHAP values can also be used to represent the distribution of the training set of the respectable SHAP value in relation with the Target value, in this case the Cardio Disease')\n\t\tpl.title('Total distribution of observations based on Shap values, colored by Target value')\n\t\tshap.summary_plot(shap_values,newData,show=False)\n\t\tst.pyplot(bbox_inches='tight')\n\t\tpl.clf()\n\t\tst.write('Which features caused this specific prediction? features in red increased the prediction, in blue decreased them')\n\t\tpred_prob=catboost_calibrated_model.predict_proba(newData)\n\t\texpectedValue=explainer.expected_value.round(4)\n\t\tsumShap=sum(shap_values[0]).round(3)\n\t\tst.write('The real probablitiy value for this individual record is: '+str(pred_prob))\n\t\tst.write('The predicted label is : '+[\"Yes\" if pred[0]==1 else \"No\"][0])\n\t\tst.write('This prediction is calculated as follows: '+'The average cardio disease probablity is : ('+str(expectedValue)+')'+' + the sum of the SHAP values. ')\n\t\tst.write( 'For this individual record the sum of the SHAP values is: '+str(sumShap))\n\t\tst.write( 'This yields to a predicted value of cardio:'+str(expectedValue)+' + '+str(sumShap)+'= '+str(expectedValue+sumShap))\n\t\tst.write('Which features caused this specific prediction? features in red increased the prediction, in blue decreased them')\n\t\tshap.force_plot(explainer.expected_value, shap_values[0],newData,matplotlib=True,show=False,figsize=(16,5))\n\t\tst.pyplot(bbox_inches='tight',dpi=300,pad_inches=0)\n\t\tpl.clf()\n\t\tst.write('In the plot above, the feature values are shown. The SHAP values are represented by the length of the specific bar.'\n'However, it is not quite clear what each single SHAP value is exactly, this can be seen below, if wanted.')\n\t\tst.title('Developing a deeper understanding of the data using SHAP: Interaction effects')\n\t\tshap.dependence_plot('ap_hi', testShapValues, test, interaction_index=\"age_bucket\")\n\t\tst.pyplot()\n\t\tpl.clf()\n\t\tshap.dependence_plot('bmi', testShapValues, test, interaction_index=\"ap_hi\")\n\t\tst.pyplot()\n\t\tpl.clf()\n\t\tst.write('Conclusion: There is interaction between bmi and ap_hi')\n \nif __name__== '__main__':\n main()\n\n\t\n" }, { "alpha_fraction": 0.4943181872367859, "alphanum_fraction": 0.59375, "avg_line_length": 15.7619047164917, "blob_id": "1ca8cf1b1dd5ac115bd38957bb5979461f409941", "content_id": "9d93b8d6f5c182aee48f36df7896ef848682c515", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 352, "license_type": "no_license", "max_line_length": 31, "num_lines": 21, "path": "/Pipfile", "repo_name": "vinay10949/cvdapp", "src_encoding": "UTF-8", "text": "[[source]]\nname = \"pypi\"\nurl = \"https://pypi.org/simple\"\nverify_ssl = true\n\n[dev-packages]\n\n[packages]\nfeature-engine = \"==0.4.3\"\nmatplotlib = \"==3.1.0\"\nnumpy = \"==1.18.4\"\npandas = \"==1.0.3\"\ncatboost = \"==0.23.1\"\nstreamlit = \"==0.49.0\"\nshap = \"==0.35.0\"\nscikit-learn = \"==0.22.2.post1\"\ncategory_encoders = \"==2.2.2\"\n\n\n[requires]\npython_version = \"3.7\"\n" } ]
3
damithsenanayake/GNG
https://github.com/damithsenanayake/GNG
3de88dd4d1900f79b843093de56deb2a0b984ea9
e7668ca786d5ef7daf5352174be65234a37d506e
58d6e615df104ed9ddcfc6827ee03fc95f93cdde
refs/heads/master
2021-04-27T12:09:14.542738
2018-04-05T00:56:26
2018-04-05T00:56:26
122,574,141
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4483478367328644, "alphanum_fraction": 0.4841739237308502, "avg_line_length": 26.132076263427734, "blob_id": "cf2cc5feca43b56feccaa2e339c7771e013e99db", "content_id": "19fffc7952605215e47d310cac7f890ffba90762", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2875, "license_type": "no_license", "max_line_length": 105, "num_lines": 106, "path": "/GNG3.py", "repo_name": "damithsenanayake/GNG", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.datasets import make_blobs, make_circles, make_s_curve, make_moons, make_swiss_roll\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ng_size = 2\n\nG = np.zeros(shape=(g_size, g_size)) # np.identity(g_size)\n\nages = np.zeros(shape=G.shape)\n\nerrors = np.zeros(g_size)\n\nW = np.random.random(size=(g_size, 3))\n\nX, c = make_swiss_roll(n_samples=500)#make_blobs(n_samples=1000, centers=5, n_features=2, random_state=1,\n #cluster_std=0.5) # make_moons(n_samples=500)#\n## Train Neural Gas\n\nX -= X.min()\nX /= X.max()\n\nalpha = 0.1\na_max = 250\n\nfor i in range(1000):\n\n for x in X:\n dists = np.linalg.norm(W - x, axis=1)\n s, t = np.argsort(dists)[:2]\n\n try:\n errors[s] += dists[s] ** 2\n except:\n np.append(errors, dists[s] ** 2)\n\n neis = np.where(G[s])[0]\n\n d = dists[neis]\n d /= (d.sum() + 0.00001)\n W[neis] += 0.01 * (x - W[neis]) # *np.array([np.exp(-0.5*d**2)]).T\n\n ages[s] += G[s]\n\n ages[s][s] = 0\n\n G[s][ages[s] > a_max] = 0\n ages[s][ages[s] > a_max] = 0\n\n if G[s][t]:\n ages[s, t] = 0\n else:\n G[s][t] = 1\n ages[s][t] = 0\n\n if i % 50 == 48 and i < 9000:\n if errors[s] > 1:\n try:\n l = np.argmax(errors[G[s] == 1])\n except IndexError:\n print errors\n W_n = W[l] + W[s]\n W_n *= 0.5\n errors[l] *= alpha\n errors[s] *= alpha\n W = np.concatenate((W, np.array([W_n])), axis=0)\n\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])]).T), axis=1)\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])])), axis=0)\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])]).T), axis=1)\n errors = np.append(errors, 0)\n\n GT = G\n agesT = ages\n errorsT = errors\n WT = W\n for s in range(G.shape[0]):\n if G[s].sum() == 0:\n WT = np.delete(W, s, 0)\n GT = np.delete(G, s, 0)\n GT = np.delete(GT, s, 1)\n agesT = np.delete(ages, s, 0)\n agesT = np.delete(agesT, s, 1)\n errorsT = np.delete(errors, s)\n\n G = GT\n ages = agesT\n errors = errorsT\n W = WT\n\n# print ages\n\n#### Visualize Neural Gas\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.scatter(X.T[0], X.T[1],X.T[2], c=c, cmap=plt.cm.Set1, alpha=0.2)\nax.scatter(W.T[0], W.T[1],W.T[2], c='black', alpha=0.2)\n\nfor i in range(G.shape[0]):\n for j in range(G.shape[1]):\n if G[i, j]:\n ax.plot([W[i, 0], W[j, 0]], [W[i, 1], W[j, 1]],[W[i,2], W[j,2]], c='black')\n\nplt.show()" }, { "alpha_fraction": 0.3668639063835144, "alphanum_fraction": 0.39266273379325867, "avg_line_length": 35.73478317260742, "blob_id": "10d4b61bce030153f62b6fdfa78263eb03d2c9b8", "content_id": "5df36424c3d9020c501ea0e3489152284846206e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8450, "license_type": "no_license", "max_line_length": 144, "num_lines": 230, "path": "/GASMAP.py", "repo_name": "damithsenanayake/GNG", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.datasets import make_swiss_roll, make_s_curve,make_blobs,load_digits, fetch_olivetti_faces\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass GASMAP(object):\n\n def t_dist(self, d, n=1):\n dists= np.power((1+d**2), -n)\n return dists/dists.sum()\n#\n# X, colors = make_swiss_roll(n_samples=15000,random_state=10)#oad_digits()#make_blobs(n_samples=1000, n_features=3, centers=4, random_state=5)#\n#\n# data =load_digits()\n#\n# X = data.data\n# X-=X.min()\n# denom = X.max()\n# # X/=denom\n# colors = data.target\n\n\n def fit_transform(self, X, colors):\n intdim = 3\n\n g_max = 10000\n\n W = np.random.random((intdim, X.shape[1]))\n Y = np.random.random((intdim, 2))\n\n G = np.zeros((W.shape[0], W.shape[0]))\n errors = np.zeros(intdim)\n gens = np.zeros(intdim)\n a_max_st = 3\n ages = np.zeros(G.shape)\n\n hits = np.zeros(G.shape[0])\n\n lrst = 0.5\n alpha = 0.01\n maxiter = 200\n D = X.shape[1]\n\n QE = []\n NG = []\n GTs = []\n print 'Graph Dimensionality : ', intdim\n for i in range(maxiter):\n a_max = 2 # -(i%2==0)# (a_max_st*(1 - i * 1. / maxiter))\n\n sf = 0.1\n GT = -np.log(sf) * X.shape[1] * np.exp(-7.5 * (1. * i) ** 6 / maxiter ** 6)\n GTs.append(GT)\n QE.append(errors.sum())\n NG.append(G.shape[0])\n struct_change = (i % 15 == 0)\n errors.fill(0)\n for x in X:\n print '\\r iteration : ', i, ' : n(G) : ', G.shape[0],\n dists = np.linalg.norm(W - x, axis=1)\n candidates = np.argsort(dists)[:intdim]\n\n #\n # if i%4 == 0:\n # t, u = np.argsort(np.linalg.norm(W-W[s], axis=1))[1:3]\n s = candidates[0]\n lr = np.exp(0.5 * i ** 2 * 1. / maxiter ** 2) * lrst\n try:\n errors[s] += dists[s] ** 2\n except:\n np.append(errors, dists[s] ** 2)\n\n neis = np.where(G[s])[0]\n D = dists[neis]\n d = np.linalg.norm(Y[neis] - Y[s], axis=1)\n if d.shape[0] and d.max():\n d /= d.max()\n\n lamb = np.nan_to_num(np.array([np.exp(-.5 * d ** 2)]).T)\n if lamb.sum():\n lamb /= lamb.sum()\n lamb = np.nan_to_num(lamb)\n print ' moving node',\n W[s] += lr * (x - W[s])\n W[neis] += lr * lamb * (W[s] - W[neis]) # - lamb * 0.01* W[neis]\n hits[s] += 1\n\n # Move Y\n\n ages[s] += G[s]\n ages[s][s] = 0\n # if not( i%4 == 0) :\n\n G[s][ages[s] >= a_max] = 0\n G[:, s][ages[s] >= a_max] = 0\n ages[s][ages[s] >= a_max] = 0\n ages[:, s][ages[s] >= a_max] = 0\n\n for t in candidates[1:]:\n G[s][t] = 1\n G[t][s] = 1\n ages[s][t] = 0\n ages[t][s] = 0\n\n for u in candidates[1:]:\n if not (u == t):\n G[u][t] = 1\n G[t][u] = 1\n\n ages[u][t] = 0\n ages[t][u] = 0\n\n if struct_change:\n print ' creating new node, ',\n while errors.max() > GT ** 2:\n grix = np.where(errors > GT ** 2)[0]\n for s in range(G.shape[0]):\n if errors[s] > GT ** 2: # and gens[s] < g_max:\n try:\n ninds = np.where(G[s] == 1)[0]\n h_er_ixs = ninds[np.argsort(errors[ninds])][:intdim]\n except ValueError:\n continue\n W_n = np.sum(W[h_er_ixs], axis=0) + W[s]\n W_n /= intdim * 1.\n\n W_n = 2 * W[s] - W_n\n\n Y_n = Y[h_er_ixs].sum(axis=0) + Y[s]\n Y_n /= intdim * 1.\n\n Y_n = 2 * Y[s] - Y_n\n\n errors[h_er_ixs] *= alpha\n errors[s] *= alpha\n gens[s] += 1\n gens[h_er_ixs] += 1\n W = np.concatenate((W, np.array([W_n])), axis=0)\n Y = np.concatenate((Y, np.array([Y_n])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])]).T), axis=1)\n G[s][-1] = 1\n G[-1][s] = 1\n G[h_er_ixs][:, -1] = 1\n G[:, -1][h_er_ixs] = 1\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])])), axis=0)\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])]).T), axis=1)\n\n errors = np.append(errors, errors[s])\n hits = np.append(hits, 0)\n gens = np.append(gens, 0)\n\n # move y\n move_range = 10\n if struct_change: move_range = 50 * (i ** 2 / maxiter ** 2)\n if i + 1 >= maxiter: move_range = 200\n print ' moving all nodes in graph, ',\n for _ in range(move_range):\n for p in range(Y.shape[0]):\n # p = s\n d = np.linalg.norm(Y - Y[p], axis=1)\n\n y_neis = np.where(G[p] == 1)[0]\n\n oths = np.where(G[p] == 0)[0] # np.array(range(G.shape[0]))#\n\n d_oths = d[oths] # np.linalg.norm(Y[oths] - Y[p], axis=1)\n\n # pushdirs = np.array([np.exp(-d_oths)]).T * 250\n pushdirs = np.array([self.t_dist(d_oths)]).T # * 5\n # pushdirs /= pushdirs.min()\n pushdirs /= pushdirs.sum()\n\n push = (Y[oths] - Y[p]) * pushdirs\n\n Y[oths] += push # * lr\n\n pulldirs = np.array([(d[y_neis])]).T # **2\n if pulldirs.sum():\n pulldirs /= pulldirs.sum()\n\n Y[p] += 0.4 * ((Y[y_neis] - Y[p]) * pulldirs).sum(axis=0) # *lr\n\n if struct_change or i == maxiter - 1:\n emptyNodes = np.where((G.sum(axis=0) <= intdim - 2)) # | ( hits<=1))\n while emptyNodes[0].shape[0]:\n W = np.delete(W, emptyNodes, axis=0)\n Y = np.delete(Y, emptyNodes, axis=0)\n G = np.delete(G, emptyNodes, axis=0)\n G = np.delete(G, emptyNodes, axis=1)\n ages = np.delete(ages, emptyNodes, axis=0)\n ages = np.delete(ages, emptyNodes, axis=1)\n errors = np.delete(errors, emptyNodes)\n hits = np.delete(hits, emptyNodes)\n hits.fill(0)\n emptyNodes = np.where((G.sum(axis=0) <= intdim - 2)) # | ( hits<=1))\n\n fig1 = plt.figure()\n for i in range(G.shape[0]):\n for j in range(G.shape[1]):\n if G[i, j]:\n plt.plot([Y[i, 0], Y[j, 0]], [Y[i, 1], Y[j, 1]], c='black')\n plt.show(block=False)\n # #\n fig2 = plt.figure()\n predictions = []\n\n for x in X:\n predictions.append(Y[np.argmin(np.linalg.norm(W - x, axis=1))])\n\n disp = np.array(predictions)\n\n plt.scatter(disp.T[0], disp.T[1], c=colors, cmap=plt.cm.hsv, alpha=0.2, s=16)\n plt.show()\n return disp\n# print ages.max()\n# fig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n# ax.scatter(X.T[0], X.T[1],X.T[2], c=colors, cmap=plt.cm.hsv, alpha=0.2)\n# ax.scatter(W.T[0], W.T[1],W.T[2], c='black', alpha=0.8)\n#\n\n# for i in range(G.shape[0]):\n# for j in range(G.shape[1]):\n# if G[i, j]:\n# ax.plot([W[i, 0], W[j, 0]], [W[i, 1], W[j, 1]],[W[i,2], W[j,2]], c='black')\n#\n# plt.show()\n# # #\n# plt.scatter(Y.T[0], Y.T[1], c='black', alpha = 0.4, s=4)\n\n" }, { "alpha_fraction": 0.4273751974105835, "alphanum_fraction": 0.4613526463508606, "avg_line_length": 26.97747802734375, "blob_id": "bf7861ac512488bb0a27190f4bc0938cbb3f93b3", "content_id": "9fa79a6635f547642fe1c2e5e148864b30b733f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6210, "license_type": "no_license", "max_line_length": 143, "num_lines": 222, "path": "/GSOMGAS-WD.py", "repo_name": "damithsenanayake/GNG", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.datasets import make_swiss_roll, make_s_curve,make_blobs,load_digits\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n#\n# X, colors = make_swiss_roll(n_samples=1000,random_state=10)#oad_digits()#make_blobs(n_samples=1000, n_features=3, centers=4, random_state=5)#\n#\ndata = load_digits()\n#\nX = data.data\ndenom = X.max(axis=0)\ndenom[denom==0]=1\n# X/=denom\ncolors = data.target\n\n\nW = np.random.random((3, X.shape[1]))\nY = np.random.random((3, 2))\n\nG = np.zeros((W.shape[0], W.shape[0]))\nerrors = np.zeros(3)\na_max =1\nages = np.zeros(G.shape)\n\nhits = np.zeros(G.shape[0])\n\nlrst = 0.1\nalpha = 0.6\nmaxiter = 50\nD = X.shape[1]\nsf = 0.1\nGT = -np.log(sf) * D\n\nfor i in range(maxiter):\n\n for x in X:\n print '\\r iteration : ', i, ' : n(G) : ', G.shape[0],\n dists = np.linalg.norm(W - x, axis=1)\n s, t, u = np.argsort(dists)[:3]\n #\n # if i%4 == 0:\n # t, u = np.argsort(np.linalg.norm(W-W[s], axis=1))[1:3]\n\n lr = np.exp(0.5*i**2*1./maxiter**2) * lrst\n try:\n errors[s] += dists[s] ** 2\n except:\n np.append(errors, dists[s] ** 2)\n\n neis = np.where(G[s])[0]\n D = dists[neis]\n d = np.linalg.norm(Y[neis]-Y[s], axis=1)\n if d.shape[0] and d.max():\n d/= d.max()\n\n lamb = np.nan_to_num(np.array([np.exp(-.5*d**2)]).T)\n if lamb.sum() :\n lamb /= lamb.sum()\n lamb = np.nan_to_num(lamb)\n W[neis] += lr * lamb * (x - W[neis])#- lamb * 0.01* W[neis]\n W[s] += lr * (x - W[s])\n hits[s]+=1\n\n # Move Y\n\n\n ages[s] += G[s]\n ages[s][s] = 0\n # if not( i%4 == 0) :\n\n G[s][ages[s] > a_max] = 0\n G[:, s][ages[s] > a_max] =0\n ages[s][ages[s] > a_max] = 0\n ages[:, s][ages[s] > a_max] =0\n\n # del_edges = np.where((D-D.mean())/D.std() >2.5)[0]\n #\n # G[s][del_edges] = 0\n # G[:, s][del_edges] = 0\n # ages[s][del_edges] =0\n # ages[:, s][del_edges]=0\n\n G[s][t] = 1\n G[t][s] = 1\n ages[s][t] = 0\n ages[t][s] =0\n\n G[s][u]=1\n G[u][s]=1\n ages[s][u]=0\n ages[u][s]=0\n\n # if not G[u][t]:\n G[u][t]=1\n G[t][u]=1\n\n ages[u][t]=0\n ages[t][u]=0\n #\n # if i% 100 == 51 :\n # neidists = np.linalg.norm(W[s]-W[neis], axis=1)\n # del_node = np.argmax(neidists)\n #\n # G[s][neis[del_node]]=0\n # G[:, s][neis[del_node]]=0\n\n\n if i % 8 == 1 and i < 9000:\n if errors[s] > GT**2:\n try:\n ninds = np.where(G[s]==1)[0]\n l, m = np.argsort(errors[ninds])[:3]\n except ValueError:\n continue\n W_n = W[ninds[l]] + W[s] + W[ninds[m]]\n W_n /= 3.\n\n W_n = 2* W[s] - W_n\n\n Y_n = Y[ninds[l]] + Y[s] + Y[ninds[m]]\n Y_n /= 3.\n\n Y_n = 2*Y[s] - Y_n\n\n errors[l] *= alpha\n errors[s] *= alpha\n W = np.concatenate((W, np.array([W_n])), axis=0)\n Y = np.concatenate((Y, np.array([Y_n])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])]).T), axis=1)\n G[s][-1]=1\n G[-1][s]=1\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])])), axis=0)\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])]).T), axis=1)\n errors = np.append(errors, 0)\n hits = np.append(hits, 0)\n\n #move y\n move_range = 1\n if i % 10 == 8: move_range = 50\n for _ in range(move_range):\n for p in range(Y.shape[0]):\n y_neis = np.where(G[p] == 1)[0]\n\n oths = np.where(G[p] == 0)[0] # np.array(range(G.shape[0]))#\n\n d_oths = np.linalg.norm(Y[oths] - Y[p], axis=1)\n\n pushdirs = np.array([np.exp(-d_oths)]).T * 500\n # pushdirs /= pushdirs.min()\n\n push = (Y[oths] - Y[p]) * pushdirs\n\n Y[oths] += push\n\n Y[p] += 0.1 * (Y[y_neis] - Y[p]).sum(axis=0)\n\n\n emptyNodes = np.where((G.sum(axis=0) <= 1))# | ( hits==0))\n\n W = np.delete(W, emptyNodes, axis=0)\n Y = np.delete(Y, emptyNodes, axis=0)\n G = np.delete(G, emptyNodes, axis=0)\n G = np.delete(G, emptyNodes, axis=1)\n ages = np.delete(ages,emptyNodes,axis=0)\n ages = np.delete(ages,emptyNodes,axis=1)\n errors = np.delete(errors, emptyNodes)\n hits = np.delete(hits, emptyNodes)\n\n# Map Correction\nprint '\\n'\nfor _ in range(1000):\n for p in range(Y.shape[0]):\n print '\\rcorrecting:, ',_,\n y_neis = np.where(G[p] == 1)[0]\n\n oths = np.array(range(G.shape[0]))#np.where(G[p] == 0)[0]\n\n d_oths = np.linalg.norm(Y[oths] - Y[p], axis=1)\n # d_oths = d_oths/d_oths.sum()\n pushdirs = np.array([np.exp(-d_oths)]).T * 50\n\n\n push = (Y[oths] - Y[p])* pushdirs\n\n Y[oths] += push\n\n\n Y[p] += 0.01*(Y[y_neis]-Y[p]).sum(axis=0)\n# print ages.max()\n# fig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n# ax.scatter(X.T[0], X.T[1],X.T[2], c=colors, cmap=plt.cm.hsv, alpha=0.2)\n# ax.scatter(W.T[0], W.T[1],W.T[2], c='black', alpha=0.8)\n#\n\n# for i in range(G.shape[0]):\n# for j in range(G.shape[1]):\n# if G[i, j]:\n# ax.plot([W[i, 0], W[j, 0]], [W[i, 1], W[j, 1]],[W[i,2], W[j,2]], c='black')\n#\n# plt.show()\n# # #\n# plt.scatter(Y.T[0], Y.T[1], c='black', alpha = 0.4, s=4)\nfig1 = plt.figure()\nfor i in range(G.shape[0]):\n for j in range(G.shape[1]):\n if G[i, j]:\n plt.plot([Y[i, 0], Y[j, 0]], [Y[i, 1], Y[j, 1]], c='black')\nplt.show(block=False)\n# #\nfig2 = plt.figure()\npredictions =[]\n\nfor x in X:\n predictions.append(Y[np.argmin(np.linalg.norm(W-x, axis=1))])\n\ndisp = np.array(predictions)\n\nplt.scatter(disp.T[0], disp.T[1], c=colors, cmap=plt.cm.hsv, alpha = 0.2, s=16)\nplt.show()" }, { "alpha_fraction": 0.4342981278896332, "alphanum_fraction": 0.4637858271598816, "avg_line_length": 29.662500381469727, "blob_id": "10c1b095289d00c32af3d7520bc7867e8ea9d7e7", "content_id": "9ccfcbbb28b210967c402b04e8bced618db0ec65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7359, "license_type": "no_license", "max_line_length": 143, "num_lines": 240, "path": "/3GSOM.py", "repo_name": "damithsenanayake/GNG", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.datasets import make_swiss_roll, make_s_curve, make_blobs, load_digits, fetch_olivetti_faces\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n# plt.style.use('dark_background')\ndef t_dist(d, n=1.5):\n dists = np.power((1 + d ** 2), -n)\n return dists / dists.sum()\n\n\n#\n# X, colors = make_swiss_roll(n_samples=1000,random_state=10)#oad_digits()#make_blobs(n_samples=1000, n_features=3, centers=4, random_state=5)#\n#\ndata = load_digits()\n\nX = data.data\ncolors = data.target\n\nintdim = 3\n\ng_max = 10000\n\nW = np.random.random((intdim, X.shape[1]))\nY = np.random.random((intdim, 2))\n\nG = np.zeros((W.shape[0], W.shape[0]))\nerrors = np.zeros(intdim)\ngens = np.zeros(intdim)\na_max_st = 3\nages = np.zeros(G.shape)\n\nhits = np.zeros(G.shape[0])\n\nlrst = 0.9\nalpha = 0.01\nmaxiter = 500\nD = X.shape[1]\n\nQE = []\nNG = []\nGTs = []\nprint 'Graph Dimensionality : ', intdim\nfor i in range(maxiter):\n a_max = 2 # -(i%2==0)# (a_max_st*(1 - i * 1. / maxiter))\n wd = 0.0025\n sf = 0.2\n GT = -np.log(sf) * X.shape[1] * np.exp(-7.5 * (1. * i) **6 / maxiter ** 6)\n GTs.append(GT)\n QE.append(errors.sum())\n NG.append(G.shape[0])\n struct_change = (i % 35 == 0)\n errors.fill(0)\n for x in X:\n print '\\r iteration : ', i, ' : n(G) : ', G.shape[0],\n dists = np.linalg.norm(W - x, axis=1)\n candidates = np.argsort(dists)[:intdim]\n\n #\n # if i%4 == 0:\n # t, u = np.argsort(np.linalg.norm(W-W[s], axis=1))[1:3]\n s = candidates[0]\n lr = np.exp(0.5 * i ** 2 * 1. / maxiter ** 2) * lrst\n try:\n errors[s] += dists[s] ** 2\n except:\n np.append(errors, dists[s] ** 2)\n\n neis = np.where(G[s])[0]\n D = dists[neis]\n d = np.linalg.norm(Y[neis] - Y[s], axis=1)\n if d.shape[0] and d.max():\n d /= d.max()\n\n lamb = np.nan_to_num(np.array([np.exp(-.5 * d ** 2)]).T)\n if lamb.sum():\n lamb /= lamb.sum()\n lamb = np.nan_to_num(lamb)\n print ' moving node',\n W[s] += lr * (x - W[s])\n W[neis] += lr * lamb * (x - W[neis]) - wd*W[neis]*(1-np.exp(-2.5*i/maxiter))\n hits[s] += 1\n\n # Move Y\n\n ages[s] += G[s]\n ages[s][s] = 0\n # if not( i%4 == 0) :\n\n G[s][ages[s] >= a_max] = 0\n G[:, s][ages[s] >= a_max] = 0\n ages[s][ages[s] >= a_max] = 0\n ages[:, s][ages[s] >= a_max] = 0\n\n for t in candidates[1:]:\n G[s][t] = 1\n G[t][s] = 1\n ages[s][t] = 0\n ages[t][s] = 0\n\n for u in candidates[1:]:\n if not (u == t):\n G[u][t] = 1\n G[t][u] = 1\n\n ages[u][t] = 0\n ages[t][u] = 0\n\n if struct_change:\n print ' creating new node, ',\n while errors.max() > GT **2 :\n grix = np.where(errors > GT ** 2)[0]\n for s in range(G.shape[0]):\n if errors[s] > GT ** 2: # and gens[s] < g_max:\n try:\n ninds = np.where(G[s] == 1)[0]\n h_er_ixs = ninds[np.argsort(errors[ninds])][:intdim]\n except ValueError:\n continue\n W_n = np.sum(W[h_er_ixs], axis=0) + W[s]\n W_n /= intdim * 1.\n\n W_n = 2 * W[s] - W_n\n\n Y_n = Y[h_er_ixs].sum(axis=0) + Y[s]\n Y_n /= intdim * 1.\n\n Y_n = 2 * Y[s] - Y_n\n\n errors[h_er_ixs] *= alpha\n errors[s] *= alpha\n gens[s] += 1\n gens[h_er_ixs] += 1\n W = np.concatenate((W, np.array([W_n])), axis=0)\n Y = np.concatenate((Y, np.array([Y_n])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])]).T), axis=1)\n G[s][-1] = 1\n G[-1][s] = 1\n G[h_er_ixs][:, -1] = 1\n G[:, -1][h_er_ixs] = 1\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])])), axis=0)\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])]).T), axis=1)\n\n errors = np.append(errors, errors[s])\n hits = np.append(hits, 0)\n gens = np.append(gens, 0)\n\n # move y\n move_range = 10\n if struct_change: move_range = 50*(i**2/maxiter**2)\n if i +1 >= maxiter: move_range = 200\n print ' moving all nodes in graph, ',\n for _ in range(move_range):\n for p in range(Y.shape[0]):\n # p = s\n d = np.linalg.norm(Y - Y[p], axis=1)\n\n y_neis = np.where(G[p] == 1)[0]\n\n oths = np.where(G[p] == 0)[0] # np.array(range(G.shape[0]))#\n\n d_oths = d[oths] # np.linalg.norm(Y[oths] - Y[p], axis=1)\n\n # pushdirs = np.array([np.exp(-d_oths)]).T * 250\n pushdirs = np.array([t_dist(d_oths)]).T # * 5\n # pushdirs /= pushdirs.min()\n pushdirs /= pushdirs.sum()\n\n push = (Y[oths] - Y[p]) * pushdirs\n\n Y[oths] += push # * lr\n\n pulldirs = np.array([(d[y_neis])]).T#**2\n if pulldirs.sum():\n pulldirs /= pulldirs.sum()\n\n Y[p] += 0.4 * ((Y[y_neis] - Y[p]) * pulldirs).sum(axis=0) # *lr\n\n if struct_change or i == maxiter - 1:\n emptyNodes = np.where((G.sum(axis=0) <= intdim - 2)) # | ( hits<=1))\n while emptyNodes[0].shape[0]:\n W = np.delete(W, emptyNodes, axis=0)\n Y = np.delete(Y, emptyNodes, axis=0)\n G = np.delete(G, emptyNodes, axis=0)\n G = np.delete(G, emptyNodes, axis=1)\n ages = np.delete(ages, emptyNodes, axis=0)\n ages = np.delete(ages, emptyNodes, axis=1)\n errors = np.delete(errors, emptyNodes)\n hits = np.delete(hits, emptyNodes)\n hits.fill(0)\n emptyNodes = np.where((G.sum(axis=0) <= intdim - 2)) # | ( hits<=1))\n\n# smoothiter = 50\n# for i in range(smoothiter):\n# for x in X:\n# b = np.argmin(np.linalg.norm(x - W, axis=1))\n#\n# neis = np.where(G[b])[0]\n#\n# dists = np.linalg.norm(Y[b] - Y[neis], axis=1)\n# if dists.max():\n# dists/=dists.max()\n# lamb = np.exp(- dists **2)\n# W[b] += (x -W[b]) * 0.1\n# W[neis] += (x - W[neis]) * 0.1 * np.array([lamb]).T\n\n # Y[neis] += (Y[b]- Y[neis])*0.01\n\n# # #\nfig1 = plt.figure()\n\nplt.scatter(Y.T[0], Y.T[1], c='black', alpha = 0.4, s=4)\nfor i in range(G.shape[0]):\n for j in range(G.shape[1]):\n if G[i, j]:\n plt.plot([Y[i, 0], Y[j, 0]], [Y[i, 1], Y[j, 1]], c='grey')\nplt.show(block=False)\n# #\nfig2 = plt.figure()\npredictions = []\n\nfor x in X:\n predictions.append(Y[np.argmin(np.linalg.norm(W - x, axis=1))])\n\ndisp = np.array(predictions)\n\nplt.scatter(disp.T[0], disp.T[1], c=colors, cmap=plt.cm.hsv, alpha=0.2, s=16, edgecolors=None)\nplt.show(block=False)\nfig3 = plt.figure()\nt = range(len(QE))\nplt.plot(t, QE)\nplt.show()\nfig4 = plt.figure()\nplt.plot(t, GTs)\nplt.show(block=False)\nfig5 = plt.figure()\nplt.plot(t, NG)\nplt.show(block=False)\n" }, { "alpha_fraction": 0.4423448443412781, "alphanum_fraction": 0.4775606691837311, "avg_line_length": 25.91907501220703, "blob_id": "f1636e3e0796b5bdbfde83dcf16c03ab07eacbfe", "content_id": "2a880b660f115453f7fa24c0a89cb232ea42a53e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4657, "license_type": "no_license", "max_line_length": 119, "num_lines": 173, "path": "/TGNGDS.py", "repo_name": "damithsenanayake/GNG", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.datasets import make_blobs, make_circles, make_s_curve, make_moons, make_swiss_roll\nimport matplotlib.pyplot as plt\nfrom sklearn.manifold import MDS\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pickle\n\ng_size = 3\n\nG = np.zeros(shape=(g_size, g_size)) # np.identity(g_size)\n\nages = np.zeros(shape=G.shape)\n\nerrors = np.zeros(g_size)\n\nW = np.random.random(size=(g_size, 3))\n\nY = np.random.random(size=(g_size,2))\n\nX, c = make_s_curve(n_samples=500, random_state=10)#make_blobs(n_samples=1000, centers=5, n_features=2, random_state=1,\n #cluster_std=0.5) # make_moons(n_samples=500)#\n## Train Neural Gas\n\nX -= X.min()\nX /= X.max()\nmds = MDS()\nalpha = 0.9\na_max = 50\n\nfor i in range(1000):\n\n for x in X:\n dists = np.linalg.norm(W - x, axis=1)\n s, t, u = np.argsort(dists)[:3]\n\n try:\n errors[s] += dists[s] ** 2\n except:\n np.append(errors, dists[s] ** 2)\n\n neis = np.where(G[s])[0]\n d = dists[neis]\n d /= (d.sum() + 0.00001)\n W[neis] += 0.001 * (x - W[neis]) # *np.array([np.exp(-0.5*d**2)]).T\n W[s] += 0.001 * (x - W[s])\n\n ages[s] += G[s]\n ages[s][s] = 0\n\n G[s][ages[s] > a_max] = 0\n G[:, s][ages[s] > a_max] =0\n ages[s][ages[s] > a_max] = 0\n ages[:, s][ages[s] > a_max] =0\n if G[s][t]:\n ages[s, t] = 0\n ages[t, s] = 0\n else:\n G[s][t] = 1\n G[t][s] = 1\n ages[s][t] = 0\n ages[t][s] =0\n\n if not G[s][u]:\n G[s][u]=1\n G[u][s]=1\n ages[s][u]=0\n ages[u][s]=0\n\n if not G[u][t]:\n G[u][t]=1\n G[t][u]=1\n\n ages[u][t]=0\n ages[t][u]=0\n\n\n if True:\n if errors[s] > 0.01:\n try:\n ninds = np.where(G[s]==1)[0]\n l, m = np.argsort(errors[ninds])[:3]\n except ValueError:\n continue\n cent = W[ninds[l]] + W[s] + W[ninds[m]]\n cent /= 3.\n\n W_n = 2*W[s] - cent\n\n # create new Y node :\n\n y_cen = Y[ninds[l]] + Y[s] + Y[ninds[m]]\n y_cen /= 3.\n\n Y_n = 2*Y[s] - y_cen\n\n errors[l] *= alpha\n errors[s] *= alpha\n errors[m] *= alpha\n W = np.concatenate((W, np.array([W_n])), axis=0)\n Y = np.concatenate((Y, np.array([Y_n])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])]).T), axis=1)\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])])), axis=0)\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])]).T), axis=1)\n errors = np.append(errors, 0)\n\n\n emptyNodes = np.where(G.sum(axis=0)<=1)\n\n W = np.delete(W, emptyNodes, axis=0)\n Y = np.delete(Y, emptyNodes, axis=0)\n G = np.delete(G, emptyNodes, axis=0)\n G = np.delete(G, emptyNodes, axis=1)\n ages = np.delete(ages,emptyNodes,axis=0)\n ages = np.delete(ages,emptyNodes,axis=1)\n errors = np.delete(errors, emptyNodes)\n\n\n\n#Projection step\n\n\nprint 'projecting : '\n# print len(np.where(G.sum(axis=1)==0)[0])\n#\nfor i in range(2000):\n\n for k in range(W.shape[0]):\n b = W[k]\n # if i < 100000:\n # neis = np.argsort(np.linalg.norm(W[k] - W, axis=1))[:5]\n # else:\n neis = np.where(G[k]==1)[0]#np.argsort(np.linalg.norm(W[k]-W, axis=1))[:5]\n\n D = np.linalg.norm(W[k]-W[neis], axis=1)\n\n d = np.linalg.norm(Y[k] - Y[neis], axis=1)\n\n # D/=D.sum()\n # d/=d.sum()\n dirs = d-D\n\n Y[neis] += .50*(np.exp(-0.5*i/2000.)) * (Y[k]- Y[neis]) * np.array([dirs]).T\n Y -= Y.min()\n Y /= Y.max()\n# print ages\n\n#### Visualize Neural Gas\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n# ax.scatter(X.T[0], X.T[1],X.T[2], c=c, cmap=plt.cm.hsv, alpha=0.2)\nax.scatter(W.T[0], W.T[1],W.T[2], c=range(W.shape[0]), cmap=plt.cm.hsv, alpha=0.8)\n\n\nfor i in range(G.shape[0]):\n for j in range(G.shape[1]):\n if G[i, j]:\n ax.plot([W[i, 0], W[j, 0]], [W[i, 1], W[j, 1]],[W[i,2], W[j,2]], c='black')\n\n\n\nplt.show()\n\n# pickle.dump(fig, open('FigureObject.fig.pickle','wb'))\n#\nplt.scatter(Y.T[0], Y.T[1], c=range(Y.shape[0]), cmap= plt.cm.hsv, alpha = 0.4)\n\nfor i in range(G.shape[0]):\n for j in range(G.shape[1]):\n if G[i, j]:\n plt.plot([Y[i, 0], Y[j, 0]], [Y[i, 1], Y[j, 1]], c='black')\nplt.show()\n" }, { "alpha_fraction": 0.44013428688049316, "alphanum_fraction": 0.4740768373012543, "avg_line_length": 24.30188751220703, "blob_id": "fcd2bb2084d99f560927a60287799fb8cf989de3", "content_id": "5509e704865f2da2057cece244a00d25e837cceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2681, "license_type": "no_license", "max_line_length": 124, "num_lines": 106, "path": "/GNG.py", "repo_name": "damithsenanayake/GNG", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.datasets import make_blobs,make_circles, make_s_curve, make_moons\nimport matplotlib.pyplot as plt\n\ng_size = 10\n\nG = np.zeros(shape=(g_size, g_size))#np.identity(g_size)\n\nages = np.zeros(shape=G.shape)\n\nerrors = np.zeros(g_size)\n\nW = np.random.random(size=(g_size,2))\n\nX, c = make_blobs(n_samples= 500, centers= 5, n_features=2, random_state=1, cluster_std=0.5) #make_moons(n_samples=500)#\n## Train Neural Gas\n\nX -= X.min()\nX /= X.max()\n\nalpha = 0.1\na_max = 100\n\nfor i in range(10000):\n\n for x in X:\n dists = np.linalg.norm(W-x, axis=1)\n s, t = np.argsort(dists)[:2]\n\n try:\n errors[s] += dists[s]**2\n except:\n np.append(errors, dists[s]**2)\n\n\n\n neis = np.where(G[s])[0]\n\n d = dists[neis]\n d /= (d.sum()+0.00001)\n W[neis] += 0.01*(x-W[neis])#*np.array([np.exp(-0.5*d**2)]).T\n\n ages[s]+=G[s]\n\n ages[s][s]=0\n\n G[s][ages[s]>a_max]=0\n ages[s][ages[s]>a_max]=0\n\n if G[s][t]:\n ages[s, t] =0\n else:\n G[s][t] = 1\n ages[s][t]=0\n\n\n\n if i%50 ==48 and i < 9000:\n if errors[s] > 1:\n try:\n l = np.argmax(errors[G[s]==1])\n except IndexError:\n print errors\n W_n = W[l] + W[s]\n W_n *= 0.5\n errors[l]*= alpha\n errors[s]*= alpha\n W=np.concatenate((W, np.array([W_n])), axis=0)\n\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])])), axis=0)\n G = np.concatenate((G, np.array([np.zeros(G.shape[0])]).T), axis=1)\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])])), axis=0)\n ages = np.concatenate((ages, np.array([np.zeros(ages.shape[0])]).T), axis=1)\n errors = np.append(errors, 0)\n\n\n GT = G\n agesT = ages\n errorsT = errors\n WT = W\n for s in range(G.shape[0]):\n if G[s].sum() == 0:\n WT=np.delete(W, s, 0)\n GT=np.delete(G, s, 0)\n GT=np.delete(GT, s, 1)\n agesT=np.delete(ages, s, 0)\n agesT=np.delete(agesT, s, 1)\n errorsT = np.delete(errors, s)\n\n G = GT\n ages = agesT\n errors = errorsT\n W = WT\n \n# print ages\n\n#### Visualize Neural Gas\nplt.scatter(X.T[0], X.T[1], c=c, cmap=plt.cm.Set1, alpha=0.2)\nplt.scatter(W.T[0], W.T[1], c='black', alpha=0.2)\n\nfor i in range(G.shape[0]):\n for j in range(G.shape[1]):\n if G[i,j]:\n plt.plot([W[i,0], W[j, 0]], [W[i,1],W[j, 1]], c='black')\n\nplt.show()" }, { "alpha_fraction": 0.4507438540458679, "alphanum_fraction": 0.4831121861934662, "avg_line_length": 26.486186981201172, "blob_id": "fe809339898dea746ee01117b6f1471b7bc4bc19", "content_id": "187a2801d4cf38663183e27bd47ad9f7525c23ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4974, "license_type": "no_license", "max_line_length": 104, "num_lines": 181, "path": "/HLGSOM.py", "repo_name": "damithsenanayake/GNG", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.datasets import make_swiss_roll, make_s_curve\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nX, t = make_s_curve(n_samples=500, random_state=100)#make_swiss_roll(n_samples=500,random_state=10)\n\nW = np.random.random((3, X.shape[1]))\nY = np.random.random((3, 2))\n\nG = np.zeros((W.shape[0], W.shape[0]))\nerrors = np.zeros(3)\namax =2\nages = np.zeros(G.shape)\n\nlrst = 0.5\nalpha = 0.2\nfor i in range(2000):\n print '\\riteration ', i+1, ' n(G) = ', G.shape[0],\n for x in X:\n lr = lrst*np.exp(-.5*i**2/50.**2)\n k = np.argmin(np.linalg.norm(W-x, axis=1))\n l, m = np.argsort(np.linalg.norm(W[k]-W, axis=1))[1:3]\n\n # hneis = np.argsort(np.linalg.norm(x-W, axis=1))[1:3]\n\n\n # G[l][m]=1\n # G[m][l]=1\n # if not np.in1d(hneis, l).any() :\n # G[k][l]=0\n # G[l][k]=0\n #\n # if not np.in1d(hneis, m).any() :\n # G[k][m] =0\n # G[m][k]=0\n #\n\n # ages[l][m] = 0\n # ages[m][l] = 0\n\n err = np.linalg.norm(x-W[k])\n W[k] += (x - W[k])*lr\n\n neis = np.where(G[k] == 1)[0]\n non_neis = np.array(range(Y.shape[0]))#np.setdiff1d(np.array(range(Y.shape[0])), neis)#\n G[k][l] = 1\n G[l][k] = 1\n G[m][k] = 1\n G[k][m] = 1\n ages[k][neis] +=1\n ages[neis][:,k] +=1\n\n # if np.in1d(hneis, l).any():\n ages[k][l] = 0\n ages[l][k] = 0\n # # if np.in1d(hneis, m).any():\n ages[m][k] = 0\n ages[k][m] = 0\n # ages[k][hneis]=0\n # ages[:, k][hneis]=0\n\n d = np.linalg.norm(W[k]-W[neis], axis=1)\n d_n = np.linalg.norm(W[k]-W[non_neis], axis=1)\n\n # d /= d.max()\n # d_n /= d_n.max()\n\n theta = np.nan_to_num(np.array([np.exp(-0.5*d**2)]).T)\n theta = np.nan_to_num(theta/theta.sum())\n\n # if np.isinf(1./d_n).any() or np.isnan(1./d_n).any():\n # print 'hold1'\n\n theta_n = np.nan_to_num(np.array([1./d_n**2]).T)#np.nan_to_num(np.array([np.exp(-.5*d_n**2)]).T)\n theta_n = np.nan_to_num(theta_n/theta_n.max())\n push = (Y[k]-Y[non_neis]) * theta_n\n\n W[neis] += (W[k]-W[neis])*lr#*theta\n\n\n\n Dw = np.linalg.norm(W[neis]-W[k], axis=1)\n dy = np.linalg.norm(Y[neis]-Y[k], axis=1)\n\n dirs = dy - Dw\n pull = np.array([dirs]).T * (Y[k]-Y[neis]) * lr# * theta\n\n if np.isnan(pull).any() or np.isinf(pull).any():\n print 'hold'\n continue\n\n Y[neis] += pull\n\n\n # Y[neis] += (Y[k]-Y[neis])*lr*theta\n if np.isnan(push).any() or np.isinf(push).any():\n print 'hold'\n continue\n Y[non_neis] -= push\n\n errors[k] += err**2\n\n ### Growth ####\n\n if errors[k] >=1 and i%2==0:\n errors[k]*= alpha\n errors[neis] += errors[neis]*alpha\n\n W_n = W[l] + W[m] + W[k]\n W_n /= 3.\n W_n = 2*W[k] - W_n\n\n Y_n = Y[l] + Y[m] + Y[k]\n Y_n /= 3.\n\n Y_n = 2*Y[k] - Y_n\n\n W = np.concatenate((W,np.array([W_n])), axis=0)\n Y = np.concatenate((Y, np.array([Y_n])), axis=0)\n\n errors = np.concatenate((errors, np.array([0])))\n\n G = np.concatenate((G, np.zeros((G.shape[0], 1))), axis=1)\n G = np.concatenate((G, np.zeros((G.shape[1], 1)).T), axis=0)\n\n ages = np.concatenate((ages, np.zeros((ages.shape[0], 1))), axis=1)\n ages = np.concatenate((ages, np.zeros((ages.shape[1], 1)).T), axis=0)\n\n\n ### edge deletion ###\n\n G[k][ages[k] >= amax] = 0\n ages[k][ages[k] >= amax] = 0\n G[:, k][ages[:,k] >= amax] = 0\n ages[:, k][ages[:, k] >= amax] = 0\n\n ### node deletion ###\n\n delcands = np.where(G.sum(axis=1)<=1)[0]\n\n ages = np.delete(ages, delcands, axis=0)\n ages = np.delete(ages,delcands, axis=1)\n G = np.delete(G, delcands, axis=0)\n G= np.delete(G, delcands, axis=1)\n Y = np.delete(Y, delcands, axis=0)\n W = np.delete(W, delcands, axis=0)\n errors = np.delete(errors, delcands)\n\n\nprint ages.max()\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.scatter(X.T[0], X.T[1],X.T[2], c=t, cmap=plt.cm.hsv, alpha=0.2)\nax.scatter(W.T[0], W.T[1],W.T[2], c=range(W.shape[0]), cmap=plt.cm.hsv, alpha=0.8)\n\n\nfor i in range(G.shape[0]):\n for j in range(G.shape[1]):\n if G[i, j]:\n ax.plot([W[i, 0], W[j, 0]], [W[i, 1], W[j, 1]],[W[i,2], W[j,2]], c='black')\n\nplt.show()\n\nplt.scatter(Y.T[0], Y.T[1], c=range(Y.shape[0]), cmap= plt.cm.hsv, alpha = 0.4)\n\nfor i in range(G.shape[0]):\n for j in range(G.shape[1]):\n if G[i, j]:\n plt.plot([Y[i, 0], Y[j, 0]], [Y[i, 1], Y[j, 1]], c='black')\nplt.show()\n\npredictions =[]\n\nfor x in X:\n predictions.append(Y[np.argmin(np.linalg.norm(W-x, axis=1))])\n\ndisp = np.array(predictions)\n\nplt.scatter(disp.T[0], disp.T[1], c=t, cmap=plt.cm.hsv, alpha = 0.4)\nplt.show()" } ]
7
AndreAgel94/RTSP-RTP-Stream
https://github.com/AndreAgel94/RTSP-RTP-Stream
2c9db40e433f590343f9b5fdfdb02d235fef2f05
fd2528e8932e04e71936a80242673f956e933b88
171a2d583345f82fc7d4e28fc43f04b03128cb45
refs/heads/master
2020-08-26T19:14:27.726409
2019-10-23T19:31:37
2019-10-23T19:31:37
217,115,956
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7572559118270874, "alphanum_fraction": 0.7709762454032898, "avg_line_length": 33.436363220214844, "blob_id": "47744f4749b0ee51eb98e73323a0db44f8396244", "content_id": "56458d6e5da781f2d2e0f95547cdf109aa57dab3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1955, "license_type": "no_license", "max_line_length": 241, "num_lines": 55, "path": "/README.md", "repo_name": "AndreAgel94/RTSP-RTP-Stream", "src_encoding": "UTF-8", "text": "# RTSP-RTP-Stream\n\nautores:\n\n.André Lannes Bernardes Agel \n\n.Samuel Fernandes Terra dos Reis\n\nUma implementaçao python da tarefa de programação do capítulo Redes Multimídia do livro do kurose: \"Computer Networking: A Top-Down Approach\". [programming assignment 7](http://media.pearsoncmg.com/aw/aw_kurose_network_3/labs/lab7/lab7.html)\n \nA aplicação implementa um servidor de vídeo streaming e um cliente, comunicando-se usando o protocolo de transmissã em tempo real(RTSP) e o protocolo de transferência em tempo real(RTP)\n\n![Demonstration](Streaming.gif)\n\n## Usando\n\nClone o repositório $git clone https://github.com/AndreAgel94/RTSP-RTP-Stream\n\n\tAbra um terminal:\n \t\t python Server.py 1025\n\n\tEm um outro terminal:\n \t\t python ClientLauncher.py 127.0.0.1 1025 5008 video.mjpeg\n \n# Funções\n\nBotão SETUP:\n* Envia a solicitação p/ configuração do servidor\n* Insere o cabeçalho de transporte(especificando a porta do socket de dados RTP criado)\n* RTP - Protocolo de Transporte em tempo real\n* Escuta/lê a resposta do servidor\n* Analisa o cabeçalho de sessão (da resposta) pra pegar a id da sessão RTSP\n* Cria um socket datagrama para receber os dados RTP\n* Coloca o timeout nesse socket de 0.5 segundos\n\nBotão PLAY:\n* Envia a solicitação para dar play no vídeo\n* Insere o cabeçalho de sessão\n* Usa a id da sessão (que foi retornada na fase de configuração)\n* Não colocar o cabeçalho do transporte nesta requisição\n* escuta a resposta do servidor\n\nBotao PAUSE:\n* Envia a solicitação para pausar o vídeo\n* Insere o cabeçalho de sessão\n* Usa a id da sessão (que foi retornada na fase de configuração)\n* Não colocar o cabeçalho do transporte nesta requisição\n* escuta a resposta do servidor\n\nBotao TEARDOWN:\n* Envia a solicitação para encerrar o vídeo\n* Insere o cabeçalho de sessão\n* Usa a id da sessão (que foi retornada na fase de configuração)\n* Não colocar o cabeçalho do transporte nesta requisição\n* escuta a resposta do servidor\n\n" }, { "alpha_fraction": 0.4406196177005768, "alphanum_fraction": 0.47504302859306335, "avg_line_length": 27.341463088989258, "blob_id": "a6d2d612c34179b7db719172588285f3168453b5", "content_id": "99df5fe7e90911bd980ceccb742cbb498fe05dfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1162, "license_type": "no_license", "max_line_length": 114, "num_lines": 41, "path": "/VideoStream.py", "repo_name": "AndreAgel94/RTSP-RTP-Stream", "src_encoding": "UTF-8", "text": "import struct\n\n\nclass VideoStream:\n def __init__(self, filename):\n self.filename = filename\n try:\n self.file = open(filename, 'rb')\n print '-'*60 + \"\\nVideo file : |\" + filename + \"| read\\n\" + '-'*60\n except:\n print \"read \" + filename + \" error\"\n raise IOError\n self.frameNum = 0\n\n def nextFrame(self):\n \"\"\"Get next frame.\"\"\"\n\n data = self.file.read(5)\n data = bytearray(data)\n\n data_int = (data[0] - 48) * 10000 + (data[1] - 48) * 1000 + (data[2] - 48) * \\\n 100 + (data[3] - 48) * 10 + (data[4] -\n 48)\n\n final_data_int = data_int\n\n if data:\n\n framelength = final_data_int\n frame = self.file.read(framelength)\n if len(frame) != framelength:\n raise ValueError('incomplete frame data')\n\n self.frameNum += 1\n print '-'*10 + \"\\nNext Frame (#\" + str(self.frameNum) + \") length:\" + str(framelength) + \"\\n\" + '-'*10\n\n return frame\n\n def frameNbr(self):\n \"\"\"Get frame number.\"\"\"\n return self.frameNum\n" } ]
2
wsg011/kt
https://github.com/wsg011/kt
e125fd0c86492a932583550f412497db8514f8a4
c30b1bd1a7599fcdfa36c05a8b734a145aa3717f
9de728af3fda85182de58a9e7d246d8aad820f35
refs/heads/master
2023-01-22T07:44:55.659233
2021-03-16T02:02:41
2021-03-16T02:02:41
305,616,599
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5674846768379211, "alphanum_fraction": 0.5829835534095764, "avg_line_length": 28.908212661743164, "blob_id": "5145023c575115038501847ee80a3d8b43a71006", "content_id": "89be8a94e4649ed172e1801a6a657d8ee2d42eb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6194, "license_type": "no_license", "max_line_length": 114, "num_lines": 207, "path": "/examples/train_dkt.py", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@Author : wsg011\n@Email : [email protected]\n@Time : 2020/10/20 16:08:36\n@Desc : \n'''\nimport os\nimport sys\nimport logging\nimport argparse\nimport itertools\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils.rnn as rnn_utils\nfrom torch.utils.data import DataLoader, Dataset\n\nsys.path.append(\"../\")\nfrom torchkt.model import DKTModel\n\nlogger = logging.Logger(__name__)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--batch_size\", default=1024, help=\"data generator size\")\nparser.add_argument(\"--dataset\", default=\"assistments\", help=\"training dataset name\")\nparser.add_argument(\"--epochs\", default=50, help=\"training epoch numbers\")\nparser.add_argument(\"--lr\", default=0.001, help=\"learning rate\")\nparser.add_argument(\"--model\", default=\"dkt\", help=\"train model\")\nparser.add_argument(\"--max_seq\", default=100, help=\"max question answer sequence length\")\nparser.add_argument(\"--root\", default=\"../data\", help=\"dataset file path\")\nargs = parser.parse_args()\n\n\nclass DKTDataset(Dataset):\n def __init__(self, fn, n_skill, max_seq=100):\n super(DKTDataset, self).__init__()\n self.n_skill = n_skill\n self.max_seq = max_seq\n\n self.user_ids = []\n self.samples = []\n with open(fn, \"r\") as csv_f:\n for student_id, q, qa in itertools.zip_longest(*[csv_f] * 3):\n student_id = int(student_id.strip())\n q = [int(x) for x in q.strip().split(\",\") if x]\n qa = [int(x) for x in qa.strip().split(\",\") if x]\n\n assert len(q) == len(qa)\n if len(q) <= 2:\n continue\n\n self.user_ids.append(student_id)\n self.samples.append((q, qa))\n\n def __len__(self):\n return len(self.user_ids)\n\n def __getitem__(self, index):\n user_id = self.user_ids[index]\n q_, qa_ = self.samples[index]\n seq_len = len(q_)\n\n q = np.zeros(self.max_seq, dtype=int)\n qa = np.zeros(self.max_seq, dtype=int)\n if seq_len >= self.max_seq:\n q[:] = q_[-self.max_seq:]\n qa[:] = qa_[-self.max_seq:]\n else:\n q[-seq_len:] = q_\n qa[-seq_len:] = qa_\n\n target_id = q[-1]\n label = qa[-1]\n\n q = q[:-1].astype(np.int)\n qa = qa[:-1].astype(np.int)\n x = q[:-1]\n x += (qa[:-1] == 1) * self.n_skill\n\n target_id = np.array([target_id]).astype(np.int)\n label = np.array([label]).astype(np.int)\n\n return x, target_id, label \n\n\ndef train(model, train_iterator, optim, criterion, device=\"cpu\"):\n model.train()\n\n train_loss = []\n num_corrects = 0\n num_total = 0\n labels = []\n outs = []\n\n tbar = tqdm(train_iterator)\n for item in tbar:\n x = item[0].to(device).long()\n target_id = item[1].to(device).long()\n label = item[2].to(device).float()\n\n optim.zero_grad()\n output = model(x)\n\n output = torch.gather(output, -1, target_id)\n pred = (torch.sigmoid(output) >= 0.5).long()\n \n loss = criterion(output, label)\n loss.backward()\n optim.step()\n\n train_loss.append(loss.item())\n num_corrects += (pred == label).sum().item()\n num_total += len(label)\n\n labels.extend(label.squeeze(-1).data.cpu().numpy())\n outs.extend(output.squeeze(-1).data.cpu().numpy())\n\n tbar.set_description('loss - {:.4f}'.format(loss))\n\n\n acc = num_corrects / num_total\n auc = roc_auc_score(labels, outs)\n loss = np.mean(train_loss)\n\n return loss, acc, auc\n\n\ndef validation(model, val_iterator, criterion, device):\n model.eval()\n\n val_loss = []\n num_corrects = 0\n num_total = 0\n labels = []\n outs = []\n\n tbar = tqdm(val_iterator)\n for item in tbar:\n x = item[0].to(device).long()\n target_id = item[1].to(device).long()\n label = item[2].to(device).float()\n\n with torch.no_grad():\n output = model(x)\n \n output = torch.gather(output, -1, target_id)\n\n pred = (torch.sigmoid(output) >= 0.5).long()\n loss = criterion(output, label)\n\n val_loss.append(loss.item())\n num_corrects += (pred == label).sum().item()\n num_total += len(label)\n\n labels.extend(label.squeeze(-1).data.cpu().numpy())\n outs.extend(output.squeeze(-1).data.cpu().numpy())\n\n tbar.set_description('loss - {:.4f}'.format(loss))\n\n acc = num_corrects / num_total\n auc = roc_auc_score(labels, outs)\n loss = np.mean(val_loss)\n\n return loss, acc, auc\n\n\nif __name__ == \"__main__\":\n path = os.path.join(args.root, args.dataset)\n\n if args.dataset == \"riid\":\n n_skill = 13523\n elif args.dataset == \"assistments\":\n n_skill = 124\n else:\n raise KeyError(\"dataset error\")\n\n train_dataset = DKTDataset(path+\"/train.csv\", max_seq=100, n_skill=n_skill)\n val_dataset = DKTDataset(path+\"/val.csv\", max_seq=100, n_skill=n_skill)\n\n train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8)\n \n val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model = DKTModel(n_skill)\n # optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.99, weight_decay=0.005)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n criterion = nn.BCEWithLogitsLoss()\n\n model.to(device)\n criterion.to(device)\n\n epochs = args.epochs\n for epoch in range(epochs):\n loss, acc, auc = train(model, train_dataloader, optimizer, criterion, device)\n print(\"epoch - {} train_loss - {:.2f} acc - {:.3f} auc - {:.3f}\".format(epoch, loss, acc, auc))\n\n val_loss, val_acc, val_auc = validation(model, val_dataloader, criterion, device)\n print(\"epoch - {} vall_loss - {:.2f} acc - {:.3f} auc - {:.3f}\".format(epoch, val_loss, val_acc, val_auc))\n\n\n\n" }, { "alpha_fraction": 0.6179245114326477, "alphanum_fraction": 0.6179245114326477, "avg_line_length": 20.299999237060547, "blob_id": "e6cb4c499c652b54e55bb2ce5b85c8a1fa211a0f", "content_id": "d4b9e5c93f2597edfd1bc1cd2e03592769869feb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 60, "num_lines": 10, "path": "/torchkt/model/base_model.py", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\n\n\nclass BaseModel(nn.Module):\n def __init__(self):\n super(BaseModel, self).__init__()\n\n def fit(self, x_train, y_train, x_val=None, y_val=None):\n return None" }, { "alpha_fraction": 0.5328443646430969, "alphanum_fraction": 0.5548469424247742, "avg_line_length": 29.163461685180664, "blob_id": "211551a2ce63584a1ace8b506a1e344756c07200", "content_id": "668de5c22752902c66488c8ea3904b7c7a814a41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3136, "license_type": "no_license", "max_line_length": 103, "num_lines": 104, "path": "/examples/sakt/model/sakt.py", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "import numpy as np\n\nimport torch\nimport torch.nn as nn\n\n\ndef future_mask(seq_length):\n future_mask = np.triu(np.ones((seq_length, seq_length)), k=1).astype('bool')\n return torch.from_numpy(future_mask)\n\n\nclass FFN(nn.Module):\n def __init__(self, state_size=200):\n super(FFN, self).__init__()\n self.state_size = state_size\n\n self.lr1 = nn.Linear(state_size, state_size)\n self.relu = nn.ReLU()\n self.lr2 = nn.Linear(state_size, state_size)\n self.dropout = nn.Dropout(0.2)\n \n def forward(self, x):\n x = self.lr1(x)\n x = self.relu(x)\n x = self.lr2(x)\n return self.dropout(x)\n\n\nclass SAKTModel(nn.Module):\n def __init__(self, n_skill, max_seq=100, embed_dim=200):\n super(SAKTModel, self).__init__()\n self.n_skill = n_skill\n self.embed_dim = embed_dim\n\n self.embedding = nn.Embedding(2*n_skill+1, embed_dim)\n self.pos_embedding = nn.Embedding(max_seq-1, embed_dim)\n self.e_embedding = nn.Embedding(n_skill+1, embed_dim)\n\n self.multi_att = nn.MultiheadAttention(embed_dim=embed_dim, num_heads=8, dropout=0.2)\n\n self.dropout = nn.Dropout(0.2)\n self.layer_normal = nn.LayerNorm(embed_dim) \n\n self.ffn = FFN(embed_dim)\n self.pred = nn.Linear(embed_dim, 1)\n self.sigmoid = nn.Sigmoid()\n\n self._reset_parameters()\n \n def _reset_parameters(self):\n r\"\"\"Initiate parameters in the model.\"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n def forward(self, x, question_ids):\n device = x.device \n # src_pad_mask = (x == 0)\n # tgt_pad_mask = (question_ids == 0)\n # mask = src_pad_mask & tgt_pad_mask\n\n x = self.embedding(x)\n pos_id = torch.arange(x.size(1)).unsqueeze(0).to(device)\n\n pos_x = self.pos_embedding(pos_id)\n x = x + pos_x\n\n e = self.e_embedding(question_ids)\n\n x = x.permute(1, 0, 2) # x: [bs, s_len, embed] => [s_len, bs, embed]\n e = e.permute(1, 0, 2)\n att_mask = future_mask(x.size(0)).to(device)\n att_output, att_weight = self.multi_att(e, x, x, attn_mask=att_mask)\n att_output = self.layer_normal(att_output + e)\n att_output = att_output.permute(1, 0, 2) # att_output: [s_len, bs, embed] => [bs, s_len, embed]\n # print(att_output.shape, att_weight.shape)\n x = self.ffn(att_output)\n # x = self.dropout(x)\n x = self.layer_normal(x + att_output)\n x = self.pred(x)\n\n return self.sigmoid(x.squeeze(-1)), att_weight\n\n\nif __name__ == \"__main__\":\n q = torch.zeros((2, 9)).long()\n qa = torch.zeros((2, 9)).long()\n\n q_ = torch.randint(0, 100, size=(2, 6))\n qa_ = torch.randint(0, 2, size=(2, 6))\n\n q[:, -6:] = q_\n qa[:, -6:] = qa_\n\n x = q[:, :-1].clone()\n x += (qa[:, :-1] == 1) * 100\n e = q[:, -1].clone()\n question_ids = q[:, 1:].clone()\n\n model = SAKTModel(n_skill=100)\n print(x.shape, question_ids.shape)\n ouput, att_weight = model(x, question_ids)\n print(ouput.shape)\n print(att_weight)" }, { "alpha_fraction": 0.5711695551872253, "alphanum_fraction": 0.6001813411712646, "avg_line_length": 26.600000381469727, "blob_id": "197c07fedca3247d768152a2ec0fbb5f501331bb", "content_id": "4db859eef2db8be30d7ee8d73866655cb1399b14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1103, "license_type": "no_license", "max_line_length": 99, "num_lines": 40, "path": "/torchkt/model/dkt.py", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@Author : wsg011\n@Email : [email protected]\n@Time : 2021/03/15 17:58:19\n@Desc : Deep Neural Network for Knowledge Tracing\n'''\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom .base_model import BaseModel\n\n\nclass DKTModel(BaseModel):\n def __init__(self, n_skill, hidden_size=100, emb_dim=100):\n\n super(DKTModel, self).__init__()\n self.n_skill = n_skill\n self.hidden_size = hidden_size\n\n self.embedding = nn.Embedding(2*n_skill+1, emb_dim)\n\n self.lstm = nn.LSTM(emb_dim, hidden_size, batch_first=True, dropout=0.2)\n\n self.pred = nn.Linear(hidden_size, n_skill)\n \n def forward(self, x):\n bs = x.size(0)\n device = x.device\n hidden = Variable(torch.zeros(1, bs, self.hidden_size)).to(device)\n cell = Variable(torch.zeros(1, bs, self.hidden_size)).to(device)\n\n x = self.embedding(x)\n\n x, _ = self.lstm(x, (hidden, cell)) # lstm output:[bs, seq_len, hidden] hidden [bs, hidden]\n x = self.pred(x[:, -1, :])\n\n return x" }, { "alpha_fraction": 0.28140702843666077, "alphanum_fraction": 0.37688443064689636, "avg_line_length": 23.75, "blob_id": "9eb730cc47cdd3a6d97b7fd8db263a259cf8a50f", "content_id": "d3ae7d6881b748ab3825d206778bad322216f205", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 199, "license_type": "no_license", "max_line_length": 38, "num_lines": 8, "path": "/readme.md", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "# Knowledge Tracing\n\n## Model Evaluation\n\n| | ASSISTments2009 | riid |\n| ------- | --------------- | ------ |\n| DeepKT | 0.776 | 0.693 |\n| sakt | 0.78 | 0.746 |\n\n" }, { "alpha_fraction": 0.5631712079048157, "alphanum_fraction": 0.579595685005188, "avg_line_length": 28.59813117980957, "blob_id": "c4bb890facfe86f4d40afaf5150fa232e76a9b42", "content_id": "8597d19b765dc042030b5f63400550631c0d1c8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3166, "license_type": "no_license", "max_line_length": 95, "num_lines": 107, "path": "/scripts/prepare_data.py", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@Author : wsg011\n@Email : [email protected]\n@Time : 2020/10/20 15:27:46\n@Desc : \n'''\nimport os\nimport csv\nimport argparse\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--root\", default=\"../data\")\nparser.add_argument(\"--dataset\", default=\"ASSISTments2009\", type=str)\nparser.add_argument(\"--input\", default=\"skill_builder_data_corrected.csv\", type=str)\nargs = parser.parse_args()\n\n\ndef save_file(data, file_name):\n csv_file = open(file_name, \"w\")\n # csv_file.write(\"user_id,skill_id,correct\\n\")\n\n for user_id in data.index:\n sample = data[user_id]\n\n q = sample[0]\n qa = sample[1]\n\n if len(q) < 2:\n continue\n \n user_id = str(int(user_id))\n q = [str(int(x)) for x in q]\n qa = [str(int(x)) for x in qa]\n csv_file.write(user_id+\"\\n\")\n csv_file.write(\",\".join(q)+\"\\n\")\n csv_file.write(\",\".join(qa)+\"\\n\")\n \n csv_file.close()\n\n return True\n\nif __name__ == \"__main__\":\n path = os.path.join(args.root, args.dataset, args.input)\n\n if args.dataset in [\"assistments2009\", \"assistments2012\", \"assistments2015\"]:\n df = pd.read_csv(path)\n \n data = pd.DataFrame()\n data[\"user_id\"] = df[\"user_id\"]\n data[\"skill_id\"] = df[\"skill_id\"]\n data[\"correct\"] = df[\"correct\"]\n data = data.dropna()\n\n elif args.dataset == \"riid\":\n dtype = {'timestamp': 'int64', 'user_id': 'int32' ,\n 'content_id': 'int16','content_type_id': 'int8',\n 'answered_correctly':'int8'}\n\n train_df = pd.read_csv(path, usecols=[1, 2, 3,4,7], dtype=dtype)\n train_df = train_df[train_df.content_type_id == False]\n train_df = train_df.sort_values(['timestamp'], ascending=True).reset_index(drop = True)\n\n data = pd.DataFrame()\n data[\"user_id\"] = train_df[\"user_id\"]\n data[\"skill_id\"] = train_df[\"content_id\"]\n data[\"correct\"] = train_df[\"answered_correctly\"]\n \n elif args.dataset == \"xkl\":\n df = pd.read_csv(path)\n\n data = df\n\n else:\n raise KeyError(\"can't get dataset name\")\n\n user_ids = data[\"user_id\"].unique()\n\n skill_ids = data[\"skill_id\"].unique()\n skill_ids = [int(x) for x in skill_ids]\n skill_ids = sorted(skill_ids)\n\n group = data.groupby('user_id').apply(lambda r: (\n r['skill_id'].values,\n r['correct'].values))\n \n train, val = train_test_split(group, test_size=0.3)\n\n # save skill_id\n skill_df = pd.DataFrame(skill_ids, columns=[\"skill_id\"])\n skill_df.to_csv(os.path.join(args.root, args.dataset, \"skills.csv\"), index=False)\n\n # save user_id\n user_ids = [int(x) for x in user_ids]\n user_df = pd.DataFrame(user_ids, columns=[\"user_id\"])\n user_df.to_csv(os.path.join(args.root, args.dataset, \"users.csv\"), index=False)\n\n # save train and val\n train_fn = os.path.join(args.root, args.dataset, \"train.csv\")\n save_file(train, train_fn)\n\n val_fn = os.path.join(args.root, args.dataset, \"val.csv\")\n save_file(val, val_fn)" }, { "alpha_fraction": 0.5995340943336487, "alphanum_fraction": 0.6188055872917175, "avg_line_length": 29.843137741088867, "blob_id": "611432360f1a560fefc130e61b06cbec007a5fc2", "content_id": "bab5a18e0d3bd8946c7acb27f6c0a67157bd4a8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4722, "license_type": "no_license", "max_line_length": 114, "num_lines": 153, "path": "/examples/sakt/train.py", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@Author : wsg011\n@Email : [email protected]\n@Time : 2020/10/20 16:08:36\n@Desc : \n'''\nimport os\nimport logging\nimport argparse\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils.rnn as rnn_utils\nfrom torch.utils.data import DataLoader\n\nfrom dataset import SAKTDataset\nfrom model.sakt import SAKTModel\n\nlogger = logging.Logger(__name__)\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--batch_size\", default=64, type=int, help=\"data generator size\")\nparser.add_argument(\"--dataset\", default=\"assistments\", help=\"training dataset name\")\nparser.add_argument(\"--epochs\", default=50, help=\"training epoch numbers\")\nparser.add_argument(\"--lr\", default=0.001, help=\"learning rate\")\nparser.add_argument(\"--model\", default=\"dkt\", help=\"train model\")\nparser.add_argument(\"--max_seq\", default=100, help=\"max question answer sequence length\")\nparser.add_argument(\"--n_skill\", default=124, type=int, help=\"training dataset size\")\nparser.add_argument(\"--root\", default=\"../data\", help=\"dataset file path\")\nargs = parser.parse_args()\n\n\ndef train(model, train_iterator, optim, criterion, device=\"cpu\"):\n model.train()\n\n train_loss = []\n num_corrects = 0\n num_total = 0\n labels = []\n outs = []\n\n tbar = tqdm(train_iterator)\n for item in tbar:\n x = item[0].to(device).long()\n questions = item[1].to(device).long()\n label = item[2].to(device).float()\n\n optim.zero_grad()\n output, _ = model(x, questions) \n loss = criterion(output, label)\n loss.backward()\n optim.step()\n train_loss.append(loss.item())\n \n output = output[:, -1]\n label = label[:, -1] \n pred = (output >= 0.5).long()\n\n num_corrects += (pred == label).sum().item()\n num_total += len(label)\n\n labels.extend(label.view(-1).data.cpu().numpy())\n outs.extend(output.view(-1).data.cpu().numpy())\n\n tbar.set_description('loss - {:.4f}'.format(loss))\n\n\n acc = num_corrects / num_total\n auc = roc_auc_score(labels, outs)\n loss = np.mean(train_loss)\n\n return loss, acc, auc\n\n\ndef validation(model, val_iterator, criterion, device):\n model.eval()\n\n val_loss = []\n num_corrects = 0\n num_total = 0\n labels = []\n outs = []\n\n tbar = tqdm(val_iterator)\n for item in tbar:\n x = item[0].to(device).long()\n questions = item[1].to(device).long()\n label = item[2].to(device).float()\n\n with torch.no_grad():\n output, _ = model(x, questions)\n loss = criterion(output, label)\n val_loss.append(loss.item())\n\n output = output[:, -1]\n label = label[:, -1] \n pred = (output >= 0.5).long()\n num_corrects += (pred == label).sum().item()\n num_total += len(label)\n\n labels.extend(label.view(-1).data.cpu().numpy())\n outs.extend(output.view(-1).data.cpu().numpy())\n\n tbar.set_description('loss - {:.4f}'.format(loss))\n\n acc = num_corrects / num_total\n auc = roc_auc_score(labels, outs)\n loss = np.mean(val_loss)\n\n return loss, acc, auc\n\n\nif __name__ == \"__main__\":\n path = os.path.join(args.root, args.dataset)\n\n if args.dataset == \"riid\":\n n_skill = 13523\n elif args.dataset == \"assistments\":\n n_skill = 124\n else:\n raise KeyError(\"dataset not find.\")\n\n train_dataset = SAKTDataset(path+\"/train.csv\", max_seq=100, n_skill=n_skill)\n val_dataset = SAKTDataset(path+\"/val.csv\", max_seq=100, n_skill=n_skill)\n\n train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8)\n \n val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8)\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n model = SAKTModel(n_skill, embed_dim=128)\n # optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.99, weight_decay=0.005)\n optimizer = torch.optim.Adam(model.parameters())\n criterion = nn.BCELoss()\n\n model.to(device)\n criterion.to(device)\n\n epochs = args.epochs\n for epoch in range(epochs):\n loss, acc, auc = train(model, train_dataloader, optimizer, criterion, device)\n print(\"epoch - {} train_loss - {:.2f} acc - {:.3f} auc - {:.4f}\".format(epoch, loss, acc, auc))\n\n val_loss, val_acc, val_auc = validation(model, val_dataloader, criterion, device)\n print(\"epoch - {} vall_loss - {:.2f} acc - {:.3f} auc - {:.4f}\".format(epoch, val_loss, val_acc, val_auc))\n\n\n\n" }, { "alpha_fraction": 0.8684210777282715, "alphanum_fraction": 0.8684210777282715, "avg_line_length": 38, "blob_id": "f0eb20f091f6a55fa9a1d4b2ac48ddac6d5974f9", "content_id": "bf153bd1a4e2bd63e5097f78c6eeca17a0edaf59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 38, "num_lines": 1, "path": "/torchkt/__init__.py", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "from torchkt.model.dkt import DKTModel" }, { "alpha_fraction": 0.4653266370296478, "alphanum_fraction": 0.47587940096855164, "avg_line_length": 25.546667098999023, "blob_id": "d7cf91bad2c05964e90079b94bad64326881642a", "content_id": "bf6f2ba8429007bf6ec1cf5c024b0642fb6f737f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1990, "license_type": "no_license", "max_line_length": 75, "num_lines": 75, "path": "/examples/sakt/dataset.py", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "import itertools\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass SAKTDataset(Dataset):\n def __init__(self, fn, n_skill, max_seq=100):\n super(SAKTDataset, self).__init__()\n self.n_skill = n_skill\n self.max_seq = max_seq\n\n self.user_ids = []\n self.samples = []\n with open(fn, \"r\") as csv_f:\n for student_id, q, qa in itertools.zip_longest(*[csv_f] * 3):\n student_id = int(student_id.strip())\n q = [int(x) for x in q.strip().split(\",\") if x]\n qa = [int(x) for x in qa.strip().split(\",\") if x]\n\n assert len(q) == len(qa)\n\n if len(q) <= 2:\n continue\n\n self.user_ids.append(student_id)\n self.samples.append((q, qa))\n\n def __len__(self):\n return len(self.user_ids)\n\n def __getitem__(self, index):\n \"\"\"\n get sakt model input\n args:\n index\n returns:\n x:\n question_id:\n target_id:\n correct\n \"\"\"\n user_id = self.user_ids[index]\n q_, qa_ = self.samples[index]\n seq_len = len(q_)\n\n q = np.zeros(self.max_seq, dtype=int)\n qa = np.zeros(self.max_seq, dtype=int)\n if seq_len >= self.max_seq:\n q[:] = q_[-self.max_seq:]\n qa[:] = qa_[-self.max_seq:]\n else:\n q[-seq_len:] = q_\n qa[-seq_len:] = qa_\n\n # target_id = np.array([q[-1]])\n # label = np.array([qa[-1]])\n questions = q[1:].copy()\n correctness = qa[1:]\n \n x = q[:-1].copy()\n x += (qa[:-1] == 1) * self.n_skill\n\n return x, questions, correctness\n \nif __name__ == \"__main__\":\n dataset = SAKTDataset(\"../data/ASSISTments2009/train.csv\", n_skill=124)\n\n x, q, target_id, label = dataset.__getitem__(10)\n print(x)\n print(q)\n print(target_id)\n print(label)" }, { "alpha_fraction": 0.8399999737739563, "alphanum_fraction": 0.8399999737739563, "avg_line_length": 25, "blob_id": "3759ca269cc7f28a8c45400ef72a3cd89c2859d5", "content_id": "429be49e3c11a7556f11bb5959d0250767f45cb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/torchkt/model/__init__.py", "repo_name": "wsg011/kt", "src_encoding": "UTF-8", "text": "from .dkt import DKTModel" } ]
10
dschoorisse/videopaal
https://github.com/dschoorisse/videopaal
b52f7045e692df6fe44ed68f05f52be663993798
118e0d888176feda90647243610a04e567901e9b
78a2265a5278d592f7ec57214333597dfc28c6ce
refs/heads/master
2018-04-12T12:47:44.672538
2017-06-13T19:43:54
2017-06-13T19:43:54
90,190,418
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5852372646331787, "alphanum_fraction": 0.6322495341300964, "avg_line_length": 27.81012725830078, "blob_id": "f8ad19689ea2ac87ca84376512f09faec7da0bc9", "content_id": "1812c242c5abc935afafa1f0ff301ecd8fbd36cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4552, "license_type": "no_license", "max_line_length": 103, "num_lines": 158, "path": "/videopaal/settings.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "\"\"\"\nDjango settings for videopaal project.\n\nGenerated by 'django-admin startproject' using Django 1.10.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'v)x3$n+y%tw7#wc+k-*268=n56t-l3$*+8^7&t%7hzju-la45f'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'videopaal_mgmt.apps.VideopaalMgmtConfig',\n 'anymail'\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'videopaal.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': ['./templates',\n os.path.join(BASE_DIR, 'templates')]\n ,\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'videopaal.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.10/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\n\nLANGUAGE_CODE = 'NL'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTIME_INPUT_FORMATS = [\n '%H:%M',\n '%H:%M:%S',\n '%H:%M:%S.%f'\n]\n\nTIME_INPUT_FORMATS = (\n '%Y-%m-%dT%H:%M', # '2006-10-25T14:30'\n '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'\n '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'\n '%Y-%m-%d %H:%M', # '2006-10-25 14:30'\n '%Y-%m-%d', # '2006-10-25'\n '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'\n '%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'\n '%m/%d/%Y %H:%M', # '10/25/2006 14:30'\n '%m/%d/%Y', # '10/25/2006'\n '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'\n '%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'\n '%m/%d/%y %H:%M', # '10/25/06 14:30'\n '%m/%d/%y', # '10/25/06'\n)\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n# Email settings\n\nANYMAIL = {\n # (exact settings here depend on your ESP...)\n \"MAILGUN_API_KEY\": os.environ.get(\"MAILGUN_API_KEY\"),\n \"MAILGUN_SENDER_DOMAIN\": os.environ.get(\"MAILGUN_SENDER_DOMAIN\"), # your Mailgun domain, if needed\n}\nEMAIL_BACKEND = \"anymail.backends.mailgun.EmailBackend\" # or sendgrid.EmailBackend, or...\nDEFAULT_FROM_EMAIL = os.environ.get(\"DEFAULT_FROM_EMAIL\") # if you don't already have this in settings\n\n\nLOGIN_REDIRECT_URL = '/videopaal'\n\nALLOWED_HOSTS = ['192.168.1.32', 'localhost', '127.0.0.1']\nDATA_UPLOAD_MAX_MEMORY_SIZE = None #FIXME: set reasonable limit\n" }, { "alpha_fraction": 0.6220532059669495, "alphanum_fraction": 0.6357414722442627, "avg_line_length": 61.619049072265625, "blob_id": "4b09e43acf76466f51a2fa18e3636d0031fae64b", "content_id": "3a35c3d007c049904d93535ffbc62d6554549e3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1315, "license_type": "no_license", "max_line_length": 120, "num_lines": 21, "path": "/videopaal_mgmt/urls.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.BookingList.as_view(), name='booking-list'),\n url(r'^booking/(?P<pk>[0-9a-f-]+)/edit/$', views.BookingUpdate.as_view(), name='booking-edit'),\n url(r'^booking/(?P<pk>[0-9a-f-]+)/customer/$', views.BookingUpdateCustomer.as_view(), name='booking-edit-customer'),\n url(r'^booking/(?P<booking_uid>[0-9a-f-]+)/canvas/$', views.canvas, name='booking-canvas'),\n url(r'^booking/(?P<booking_uid>[0-9a-f-]+)/png/$', views.png, name='booking-png'),\n url(r'^booking/(?P<pk>[0-9a-f-]+)/view/$', views.BookingDetail.as_view(), name='booking-view'),\n url(r'^booking/(?P<pk>[0-9a-f-]+)/delete/$', views.BookingDelete.as_view(), name='booking-delete'),\n url(r'^booking/create/$', views.BookingCreate.as_view(), name='booking-create'),\n url(r'^booking/(?P<booking_uid>[0-9a-f-]+)/package/$', views.package, name='booking-package'),\n url(r'^video/create/booking/(?P<booking_uid>[0-9a-f-]+)/$', views.VideoCreate.as_view(), name='video-create'),\n url(r'^video/(?P<pk>[0-9a-f-]+)/delete/$', views.VideoDelete.as_view(), name='video-delete'),\n url(r'^booking/$', views.BookingList.as_view(), name='booking-list'),\n\n # API\n url(r'^api/booking/$', views.BookingListJSON.as_view(), name='api-booking-list'),\n\n]\n" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.6004273295402527, "avg_line_length": 22.399999618530273, "blob_id": "0dd019505974d73abcc9df4857e0ce0e2c384362", "content_id": "cd312ab49a4b5ac64e2139310a1cf67c252723e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 58, "num_lines": 20, "path": "/videopaal_mgmt/migrations/0010_booking_svg_data.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.1 on 2017-06-08 17:20\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('videopaal_mgmt', '0009_booking_canvas_data'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='booking',\n name='svg_data',\n field=models.TextField(blank=True, null=True),\n ),\n ]\n" }, { "alpha_fraction": 0.6250885725021362, "alphanum_fraction": 0.6306570768356323, "avg_line_length": 34.528778076171875, "blob_id": "2e5f0cede99e29f66a8b824f765e0a34ac8826f5", "content_id": "888d62cc1af16e86180267261e9eed891a334172", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9877, "license_type": "no_license", "max_line_length": 115, "num_lines": 278, "path": "/videopaal_mgmt/models.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.urls import reverse\nimport uuid\n\n# Signal on change\nfrom django.dispatch import receiver\nfrom django.db.models.signals import pre_save, post_save, post_delete\n\n# Email\nfrom django.core.mail import send_mail, EmailMessage\nfrom django.template.loader import render_to_string, get_template\n\n# Zip and upload\nimport zipfile\nimport os\nimport paramiko\n\n# SMS\nfrom .sms import CmSms\nsms = CmSms(os.environ.get(\"CM_APIKEY\"))\n\n# Create your models here.\nclass Booking (models.Model):\n PAID = 'PA'\n UNPAID = 'UN'\n PAYMENT_CHOICES = (\n (PAID, 'Paid'),\n (UNPAID, 'Unpaid'),\n )\n\n NOT_READY = 'NR'\n ENCODING = 'EN'\n PACKAGING = 'PK'\n ARCHIVE_UPLOADING = 'AU'\n READY = 'RD'\n VIDEO_STATUS_CHOICES = (\n (NOT_READY, 'Not ready'),\n (ENCODING, 'Encoding'),\n (PACKAGING, 'Packaging'),\n (ARCHIVE_UPLOADING, 'Uploading archive to server'),\n (READY, 'Ready'),\n )\n\n NOT_SET = 'NS'\n BREDA = 'BR'\n HULST = 'HU'\n BUSINESS_LOCATION = (\n (NOT_SET, 'Unknown'),\n (BREDA, 'Breda'),\n (HULST, 'Hulst'),\n )\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n quotationnr = models.CharField(max_length=20, null=True, blank=True)\n event_date = models.DateField()\n videos_in_archive = models.NullBooleanField()\n delivery = models.NullBooleanField()\n delivery_location = models.CharField(max_length=100, null=True, blank=True)\n delivery_ready_before = models.TimeField(null=True, blank=True)\n delivered = models.DateTimeField(null=True, blank=True)\n customer_first_name = models.CharField(max_length=100)\n customer_last_name = models.CharField(max_length=100)\n customer_email = models.EmailField()\n customer_mobile = models.CharField(max_length=15)\n customer_address = models.CharField(max_length=100)\n payment_status = models.CharField(\n max_length=2,\n choices=PAYMENT_CHOICES,\n default=UNPAID,\n )\n video_status = models.CharField(\n max_length=2,\n choices=VIDEO_STATUS_CHOICES,\n default=NOT_READY,\n )\n business_location = models.CharField(\n max_length=2,\n choices=BUSINESS_LOCATION,\n default=NOT_SET,\n )\n link = models.URLField(null=True, blank=True)\n last_time_updated_by_customer = models.DateTimeField(null=True, blank=True)\n local_package = models.FileField(null=True, blank=True)\n canvas_data = models.TextField(null=True, blank=True)\n svg_data = models.TextField(null=True, blank=True)\n\n def __str__(self):\n return str(self.event_date) + \", \" + self.customer_last_name\n\n def as_dict(self):\n return {\n \"id\": str(self.id),\n \"event_date\": str(self.event_date),\n \"customer_first_name\": str(self.customer_first_name),\n \"customer_last_name\": str(self.customer_last_name)\n }\n\n def get_absolute_url(self):\n return reverse('booking-view', kwargs={'pk': self.pk})\n\n def get_video_filenames(self):\n videos = Video.objects.filter(booking=self.pk)\n files = []\n for video in videos:\n files.append(video.file)\n return files\n\n def create_zip(self):\n files = self.get_video_filenames()\n\n # Use GUID as filename for archive\n filename = \"archives/%s.zip\" % self.pk\n\n # The zip compressor\n zf = zipfile.ZipFile(filename, \"w\")\n\n for file in files:\n # Calculate path for file in zip\n # fdir, fname = os.path.split(fpath)\n # zip_path = os.path.join(zip_subdir, fname)\n\n # Add file, at correct path\n print(\"Zipping %s\" % file.url)\n zf.write(file.name, \"videopaal/\" + os.path.basename(file.name))\n\n # Must close zip for all contents to be written\n zf.close()\n\n # Save path of created zip file to booking info\n self.local_package.name = filename\n self.save(update_fields=['local_package'])\n\n def upload_zip(self):\n host = os.environ.get(\"UPLOAD_SSH_HOST\")\n port = int(os.environ.get(\"UPLOAD_SSH_PORT\"))\n transport = paramiko.Transport((host, port))\n transport.connect(username=os.environ.get(\"UPLOAD_SSH_USER\"), password=os.environ.get(\"UPLOAD_SSH_PASSWORD\"))\n\n sftp = paramiko.SFTPClient.from_transport(transport)\n\n # Construct remote path from configured upload path, appended with local filename (stripped of directories)\n filename = os.path.basename(self.local_package.name)\n path = os.environ.get(\"UPLOAD_PATH\") % filename\n\n self.video_status = 'AU' #TODO: improve status system\n self.save(update_fields=['video_status'])\n\n sftp.put(self.local_package.name, path)\n sftp.close()\n transport.close()\n\n # FIXME::: save http target to where the file was uploaded <now generated in view>\n self.link = os.environ.get(\"HTTP_BASE_PATH\") + filename\n self.save(update_fields=['link'])\n\n self.video_status = 'RD'\n self.save(update_fields=['video_status'])\n\n def package(self):\n from .tasks import package_booking_task\n\n print(\"---> Starting packaging and upload job\")\n package_booking_task.delay(booking_id=self.pk)\n\n\n# Register for changes in booking delivery status\n# http://stackoverflow.com/questions/1160019/django-send-email-on-model-change\n@receiver(post_save, sender=Booking, dispatch_uid='delivered')\ndef booking_changed(sender, instance, created, update_fields, **kwargs):\n print('Booking changed')\n print(instance)\n print(update_fields)\n\n if created:\n subject = \"Booking request received\"\n to = [instance.customer_email]\n\n ctx = {\n 'customer_first_name': instance.customer_first_name,\n 'customer_last_name': instance.customer_last_name,\n 'event_date': instance.event_date,\n 'customer_mobile': instance.customer_mobile,\n 'delivery': instance.delivery,\n 'delivery_ready_before': instance.delivery_ready_before,\n 'customer_link': \"http://127.0.0.1:8000\" + reverse('booking-edit-customer', kwargs={'pk': instance.pk})\n # FIXME: create external link variable and use it to create link\n }\n\n message = render_to_string('videopaal_mgmt/email/created.txt', ctx)\n EmailMessage(subject, message, to=to).send()\n\n sms.send(instance.customer_mobile, \"Beste %s, je boeking voor deVideopaal is vastgelegd. Bij afleveren \"\n \"sturen wij een SMS naar dit nummer. Bedankt voor je boeking!\" % instance.customer_first_name)\n\n elif update_fields:\n # Check if 'delivered' time field is specifically updated\n if 'delivered' in update_fields:\n print(\"---> A video booth is delivered\")\n\n # Send e-mail\n subject = \"Video booth delivered\"\n to = [instance.customer_email]\n from_email = os.environ.get(\"FROM_EMAIL\")\n\n ctx = {\n 'customer_first_name': instance.customer_first_name,\n 'customer_last_name': instance.customer_last_name,\n 'customer_mobile': instance.customer_first_name,\n }\n\n message = render_to_string('videopaal_mgmt/email/delivered.txt', ctx)\n EmailMessage(subject, message, to=to).send()\n\n # Send SMS\n #sms.send(instance.customer_mobile, \"Het videogastenboek staat gereed op locatie.\")\n\n if 'link' in update_fields:\n print(\"---> Video files are ready\")\n\n # Send e-mail\n subject = \"Your video files are ready\"\n to = [instance.customer_email]\n from_email = os.environ.get(\"FROM_EMAIL\")\n\n ctx = {\n 'customer_first_name': instance.customer_first_name,\n 'customer_last_name': instance.customer_last_name,\n 'link': instance.link,\n }\n\n message = render_to_string('videopaal_mgmt/email/video_files_ready.txt', ctx)\n EmailMessage(subject, message, to=to, from_email=from_email).send()\n\n # Send SMS\n sms.send(instance.customer_mobile, \"Uw videobestanden zijn verwerkt en staan klaar als download. \"\n \"U ontvangt een e-mail van ons met de downloadlink.\")\n\n\nclass Video(models.Model):\n QUEUED = 'QD' # FIXME: when videos are uploaded they should receive this status\n ENCODING = 'EN'\n READY = 'RD'\n VIDEO_STATUS_CHOICES = (\n (QUEUED, 'Queued'),\n (ENCODING, 'Encoding'),\n (READY, 'Ready'),\n )\n\n booking = models.ForeignKey(Booking)\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n file = models.FileField(upload_to='uploads')\n ready = models.BooleanField(default=False)\n status = models.CharField(\n max_length=2,\n choices=VIDEO_STATUS_CHOICES,\n default=QUEUED,\n )\n\n\n@receiver(post_save, sender=Video, dispatch_uid='file')\ndef new_video(sender, instance, created, *args, **kwargs):\n # Change to use 'signals' to prevent import here/circular import\n # http://stackoverflow.com/questions/26379026/resolving-circular-imports-in-celery-and-django\n # http://stackoverflow.com/questions/17313532/django-import-loop-between-celery-tasks-and-my-models\n from .tasks import encode_video\n\n if created:\n print(\"---> New video created\")\n print(instance.pk)\n encode_video.delay(video_id = instance.pk)\n print(\"---> video sent to encoder\")\n else:\n print(\"---> Video updated (no new video)\")\n\n@receiver(post_delete, sender=Video)\ndef video_deleted(sender, instance, **kwargs):\n # When a video record gets deleted, delete the corresponding video file on the disk\n instance.file.delete(False) # Pass False so FileField doesn' save the model\n" }, { "alpha_fraction": 0.37721189856529236, "alphanum_fraction": 0.37807509303092957, "avg_line_length": 34.64615249633789, "blob_id": "1ec0275486060a8e06abba1b8a605f81500c8800", "content_id": "eafe205cafa8fb0b8b731dc0cd6afa90c99d65eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 2317, "license_type": "no_license", "max_line_length": 118, "num_lines": 65, "path": "/videopaal_mgmt/templates/videopaal_mgmt/booking_list.html", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "{% extends \"base.html\" %}\n{% block title %}Booking form{% endblock %}\n\n{% block content %}\n <h1>All bookings</h1>\n\n <a class=\"btn btn-primary\" href={% url 'booking-create' %} role=\"button\">\n Add booking\n </a>\n\n <table class=\"table\">\n <thead>\n <tr>\n <th>Event date</th>\n <th>Customer name</th>\n <th>Delivery</th>\n <th>Deliver before</th>\n <th>Location</th>\n <th>Status</th>\n <th>Delete</th>\n </tr>\n </thead>\n <tbody>\n\n {% for booking in object_list %}\n <tr>\n <td> <a href={% url 'booking-view' booking.pk %}> {{ booking.event_date|date:\"d-m-Y\" }} </a> </td>\n <td> {{ booking.customer_first_name}} {{ booking.customer_last_name }} </td>\n <td>\n {% if booking.delivery == True %}\n <span class=\"badge badge-success\">Delivery</span>\n {% elif booking.delivery == False %}\n <span class=\"badge badge-primary\">Pick-up</span>\n {% elif booking.delivery == None %}\n <span class=\"badge badge-default\">Unknown</span>\n {% else %}\n <span class=\"badge badge-danger\">Error retrieving status</span>\n {% endif %}\n <td>\n {{ booking.delivery_ready_before|default_if_none:\"\" }}\n </td>\n <td>\n {{ booking.delivery_location|default_if_none:\"\" }}\n </td>\n <td>\n {{ booking.video_status|default_if_none:\"error\" }}\n </td>\n <td>\n <a class=\"btn btn-danger btn-sm\" href=\"{% url 'booking-delete' booking.pk %}\">Delete</a>\n </td>\n </tr>\n\n {% empty %}\n <tr>\n <td> No bookings yet. </td>>\n <td> </td>\n <td> </td>\n <td> </td>\n </tr>\n {% endfor %}\n\n </tbody>\n </table>\n\n{% endblock %}\n" }, { "alpha_fraction": 0.5196998119354248, "alphanum_fraction": 0.5834896564483643, "avg_line_length": 25.649999618530273, "blob_id": "a8bde9dd7f21f76208fc4dede71b2497c9f23f74", "content_id": "a635b37b618a5e119d3f42f432975e6d5977b59d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 533, "license_type": "no_license", "max_line_length": 128, "num_lines": 20, "path": "/videopaal_mgmt/migrations/0008_video_status.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.1 on 2017-05-09 20:37\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('videopaal_mgmt', '0007_auto_20170509_2033'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='video',\n name='status',\n field=models.CharField(choices=[('QD', 'Queued'), ('EN', 'Encoding'), ('RD', 'Ready')], default='QD', max_length=2),\n ),\n ]\n" }, { "alpha_fraction": 0.6084497570991516, "alphanum_fraction": 0.6131071448326111, "avg_line_length": 32.76404571533203, "blob_id": "4ac9ae6c201036ceeeef330b32bb6edede2c546a", "content_id": "75c14b6e198b5a75bb99ea10fe488043daf15a0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3006, "license_type": "no_license", "max_line_length": 100, "num_lines": 89, "path": "/videopaal_mgmt/tasks.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "# Create your tasks here\nfrom celery import shared_task\nfrom .models import Booking, Video\nimport sys, os, subprocess\nimport traceback\n\n# Zip and upload\nimport os\n\n\n@shared_task\ndef encode_video(video_id):\n video = Video.objects.get(id = video_id)\n try:\n filename = os.path.basename(video.file.name)\n filename_without_ext = os.path.splitext(filename)[0]\n output_file_name = 'devideopaal-{}.mp4'.format(filename_without_ext)\n output_path = os.path.join(\"uploads\", output_file_name)\n\n # Update video status to ENCODING status\n print('Set encoding status')\n video.status = 'EN'\n video.save(update_fields=['status'])\n\n # Add check, only update to ready if process ran successfully\n subprocess.call([\n 'ffmpeg',\n '-i', video.file.name,\n '-i', 'videopaal_mgmt/watermerk.png',\n '-filter_complex', 'hflip,vflip, overlay=main_w-overlay_w-10:main_h-overlay_h-10',\n '-codec:v', 'libx264',\n '-crf', '18',\n '-preset', 'slow',\n '-pix_fmt', 'yuv420p',\n output_path])\n\n # Delete old file #TODO: do we really want this, just throw away the original?\n video.file.delete()\n\n # Set newly encoded file\n video.file.name = output_path\n\n video.ready = True\n video.save(update_fields=['ready'])\n\n # Update video status to READY status\n video.status = 'RD'\n video.save(update_fields=['status', 'file'])\n\n # TODO: remove ready attribute\n # TODO: check if encoding failed. If failed set (not yet existing) FAILED status\n\n except Exception as e:\n print(traceback.format_exc())\n\n\n@shared_task()\ndef package_booking_task(booking_id):\n booking = Booking.objects.get(id=booking_id)\n print(\"Starting packaging of booking: %s\" % booking)\n\n videos = Video.objects.filter(booking=booking.pk)\n\n # Search for videos thar are not ready yet (should not be the case) and try again to encode them\n # TODO: rewrite, use status field instead of ready field\n for video in videos:\n if not video.ready:\n #raise Exception(\"Some videos not ready\")\n print(\"File %s not yet ready, trying to encode\")\n if os.path.isfile(video.file.path): # Check if video really exists on filesystem\n encode_video(video.pk)\n else:\n print(\"%s does not exist, deleting node\" % video.file.name)\n video.delete()\n\n # Check if all files are in place\n for video in videos:\n if video.ready:\n if not os.path.isfile(video.file.path): # Check for videos missing on filesystem\n print(\"%s does not exist, deleting node\" % video.file.name)\n video.delete()\n\n #booking.video_status = 'PK'\n #booking.save(update_fields=['video_status'])\n # TODO: commit change\n\n files = booking.get_video_filenames()\n booking.create_zip()\n booking.upload_zip()\n\n" }, { "alpha_fraction": 0.725806474685669, "alphanum_fraction": 0.7822580933570862, "avg_line_length": 23.600000381469727, "blob_id": "7e8ff59aa3d3daaf6358aa345db4a92ac974a45b", "content_id": "769535eb98f35cf7dabdddb086f9d5034a59ba54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 124, "license_type": "no_license", "max_line_length": 50, "num_lines": 5, "path": "/videopaal_mgmt/static/main/tmp/readme.txt", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "For now all images must be exact 768x1024 px.\n\nScaling in video booth app is not yet implemented.\n\nTODO: implement scaling \n" }, { "alpha_fraction": 0.5363036394119263, "alphanum_fraction": 0.5924092531204224, "avg_line_length": 29.299999237060547, "blob_id": "727d5161b13c163c1e05e180d853683a9960233b", "content_id": "e2204dd327744d26a54d96545672e68746594e5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 606, "license_type": "no_license", "max_line_length": 191, "num_lines": 20, "path": "/videopaal_mgmt/migrations/0007_auto_20170509_2033.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.1 on 2017-05-09 20:33\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('videopaal_mgmt', '0006_auto_20170509_2029'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='booking',\n name='video_status',\n field=models.CharField(choices=[('NR', 'Not ready'), ('EN', 'Encoding'), ('PK', 'Packaging'), ('AU', 'Uploading archive to server'), ('RD', 'Ready')], default='NR', max_length=2),\n ),\n ]\n" }, { "alpha_fraction": 0.6073512434959412, "alphanum_fraction": 0.6073512434959412, "avg_line_length": 35.46808624267578, "blob_id": "79b14f4452d5bdf9736ccf8bfab0fe6a7c1595da", "content_id": "2c2d662af73168e2cdd53814c3cf44244acde0dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1714, "license_type": "no_license", "max_line_length": 114, "num_lines": 47, "path": "/videopaal_mgmt/forms.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Booking, Video\nfrom django.urls import reverse\n\n\nclass BookingCreateUpdateForm(forms.ModelForm):\n\n delivery_ready_before = forms.TimeField(widget=forms.TimeInput(\n format='%H:%M', attrs={'type': 'time', 'class': 'form-control'}))\n\n class Meta:\n model = Booking\n fields = ['event_date', 'customer_first_name', 'customer_last_name', 'customer_address', 'customer_email',\n 'customer_mobile', 'delivery', 'delivery_location', 'delivery_ready_before']\n\n widgets = {\n 'customer_first_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'customer_last_name': forms.TextInput(attrs={'class': 'form-control'}),\n 'customer_address': forms.TextInput(attrs={'class': 'form-control'}),\n 'customer_email': forms.TextInput(attrs={'class': 'form-control'}),\n 'customer_mobile': forms.TextInput(attrs={'class': \"form-control\"}),\n 'delivery_location': forms.TextInput(attrs={'class': 'form-control'}),\n 'event_date': forms.DateInput(\n format=\"%Y-%m-%d\", attrs={'class':'form-control', 'type': 'date'})\n }\n\n def get_absolute_url(self):\n return reverse('booking-detail', kwargs={'pk': self.pk})\n\n\nclass BookingUpdateCustomerForm(forms.ModelForm):\n\n class Meta:\n model = Booking\n fields = ['svg_data', 'canvas_data']\n\n def get_absolute_url(self):\n return reverse('booking-detail', kwargs={'pk': self.pk})\n\n\nclass VideoCreateForm(forms.ModelForm):\n class Meta:\n model = Video\n fields = ['file', 'booking']\n widgets = {\n 'booking': forms.HiddenInput(),\n }\n" }, { "alpha_fraction": 0.5139116048812866, "alphanum_fraction": 0.5482814908027649, "avg_line_length": 21.629629135131836, "blob_id": "0b06173be86b2fe3107f820a826b45f964e93439", "content_id": "65aa13ab54a21c048d6b5d25f701442124636527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 611, "license_type": "no_license", "max_line_length": 52, "num_lines": 27, "path": "/videopaal_mgmt/migrations/0011_auto_20170613_1728.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.2 on 2017-06-13 17:28\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('videopaal_mgmt', '0010_booking_svg_data'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='booking',\n name='background',\n ),\n migrations.RemoveField(\n model_name='booking',\n name='message',\n ),\n migrations.RemoveField(\n model_name='booking',\n name='title',\n ),\n ]\n" }, { "alpha_fraction": 0.7323818206787109, "alphanum_fraction": 0.7555753588676453, "avg_line_length": 31.97058868408203, "blob_id": "60a0445d90b440396ec4a212ee675199cb95cf79", "content_id": "36792d139b22eff6eddac0178288ea392893d40b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 113, "num_lines": 34, "path": "/README.md", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "First attempt at writing a Django + Celery powered web application. It will support a video booth rental service.\n\n# What this application does\nThis application will eventually do the following:\n- Let an admin/registered user create an booking\n- Using an unique link, let client fill in some information (message, photo, etc...)\n- Track booking status, delivery status (send SMS to customer on delivery of video booth)\n- Let admin/registered user upload raw videos and assign them to a booking\n- Videos will be encoded using ffmpeg by a Celery worker\n- Once complete, a booking will be packaged into a ZIP file\n- The ZIP file is uploaded over SFTP to a external web host.\n\n\n# How to start it\n0. Start RabbitMQ server for fist time\n```bash\n# docker pull rabbitmq\n# docker run -d -p 5672:5672 -p 15672:15672 -e RABBITMQ_NODENAME-my-rabbit --name rabbit-server rabbitmq\n```\n1. Start RabbitMQ server thereafter\n```bash\n# docker start rabbit-server\n```\n\n2. Start Celery worker\n```bash\n# cd ~/PycharmProjects/videopaal\n# celery -A videopaal worker -l info\n```\n\n3. Start (this) Django App\n```bash\n# manage.py runserver 8000\n```\n" }, { "alpha_fraction": 0.7234567999839783, "alphanum_fraction": 0.7234567999839783, "avg_line_length": 24.3125, "blob_id": "bc10c36e627790dc2fb53e2b4dce128a724a79ca", "content_id": "9c209a32e90ff8a632b85dd60c77f153a31f2264", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 405, "license_type": "no_license", "max_line_length": 60, "num_lines": 16, "path": "/videopaal_mgmt/admin.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import Booking, Video\n\n# Register your models here.\nclass BookingAdmin(admin.ModelAdmin):\n list_display = ('event_date', 'quotationnr', 'delivery')\n\n\nclass VideoAdmin(admin.ModelAdmin):\n list_display = ('id', 'file', 'booking', 'ready')\n list_filter = ('booking',)\n\n\nadmin.site.register(Booking, BookingAdmin)\nadmin.site.register(Video, VideoAdmin)\n" }, { "alpha_fraction": 0.4151857793331146, "alphanum_fraction": 0.4281098544597626, "avg_line_length": 27.136363983154297, "blob_id": "a87901c1938230fdb91322a00b6c5699a857b218", "content_id": "ec2149edbf29d89a9b5e98156cfa75ee536780b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1238, "license_type": "no_license", "max_line_length": 97, "num_lines": 44, "path": "/videopaal_mgmt/sms.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "import requests\nimport json\n\n\nclass CmSms:\n ENDPOINT = 'https://gw.cmtelecom.com/v1.0/message'\n HEADERS = {'content-type': 'application/json'}\n\n def __init__(self, apikey):\n self.__apikey = apikey\n\n def send(self, to, message):\n if len(message) > 160:\n raise ValueError('Message to long')\n\n if len(to) < 10:\n raise ValueError('Phone number to short')\n elif len(to) > 13:\n raise ValueError('Phone number to long')\n\n if len(to) == 10:\n to = \"0031\" + to[1:]\n\n payload = {\n \"messages\": {\n \"authentication\": {\n \"producttoken\": self.__apikey\n },\n \"msg\": [ {\n \"from\": \"deVideopaal\",\n \"to\": [{\n \"number\": to\n }],\n \"body\": {\n \"content\": message\n }\n }\n ]\n }\n }\n print(json.dumps(payload))\n response = requests.post(CmSms.ENDPOINT, data=json.dumps(payload), headers=CmSms.HEADERS)\n print(response)\n return response.status_code\n" }, { "alpha_fraction": 0.6760607957839966, "alphanum_fraction": 0.6803356409072876, "avg_line_length": 31.72538948059082, "blob_id": "0fd948b58862fa00957cb006138570e863084331", "content_id": "61b08f95e3520ed562fa847b87751a7517293553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6316, "license_type": "no_license", "max_line_length": 90, "num_lines": 193, "path": "/videopaal_mgmt/views.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "from django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.views import generic\nfrom django.urls import reverse_lazy, reverse\nfrom django.core.mail import send_mail, EmailMessage\nfrom django.template.loader import render_to_string, get_template\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nimport json\nimport os\nimport glob\nimport base64\nfrom django.http import JsonResponse\n\nfrom .models import Booking, Video\nfrom .forms import BookingCreateUpdateForm, VideoCreateForm, BookingUpdateCustomerForm\n\n# Direct copy of Django documentation\nclass JSONResponseMixin(object):\n \"\"\"\n A mixin that can be used to render a JSON response.\n \"\"\"\n def render_to_json_response(self, context, **response_kwargs):\n \"\"\"\n Returns a JSON response, transforming 'context' to make the payload.\n \"\"\"\n return JsonResponse(\n self.get_data(context),\n **response_kwargs\n )\n\n def get_data(self, context):\n \"\"\"\n Returns an object that will be serialized as JSON by json.dumps().\n \"\"\"\n # Note: This is *EXTREMELY* naive; in reality, you'll need\n # to do much more complex handling to ensure that arbitrary\n # objects -- such as Django model instances or querysets\n # -- can be serialized as JSON.\n return context\n\n\n# Create your views here.\ndef index(request):\n return HttpResponse(\"Welcome to the video booth app\")\n\n\ndef success(request):\n return HttpResponse(\"Successfully updated record\")\n\n\nclass BookingList(LoginRequiredMixin, generic.ListView):\n print(\"Regular responder\")\n model = Booking\n\n# TODO: login or authenication required\nclass BookingListJSON(generic.ListView):\n print(\"JSON responder\")\n queryset = Booking.objects.all()\n\n def get(self, request, *args, **kwargs):\n dictionaries = [ obj.as_dict() for obj in self.get_queryset() ]\n return HttpResponse(json.dumps(dictionaries), content_type='application/json')\n\nclass BookingDetail(LoginRequiredMixin, generic.DetailView):\n model = Booking\n fields = [\n 'event_date', 'customer_first_name', 'customer_last_name',\n 'customer_address', 'customer_email', 'customer_mobile',\n 'event_date', 'delivery', 'delivery_location', 'delivery_ready_before'\n ]\n\n def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(BookingDetail, self).get_context_data(**kwargs)\n context['video_list'] = Video.objects.filter(booking=self.object.pk)\n return context\n\n\nclass BookingCreate(LoginRequiredMixin, generic.CreateView):\n model = Booking\n form_class = BookingCreateUpdateForm\n template_name = 'videopaal_mgmt/booking_create.html'\n\n\nclass BookingUpdate(LoginRequiredMixin, generic.UpdateView):\n model = Booking\n form_class = BookingCreateUpdateForm\n template_name = 'videopaal_mgmt/booking_update.html'\n\n\nclass BookingUpdateCustomer(generic.UpdateView):\n model = Booking\n form_class = BookingUpdateCustomerForm\n # TODO: define fields that may be updated by customer in 'fields' variable\n\n\n path = \"videopaal_mgmt/static/main/tmp/*.jpg\"\n img_path_list = glob.glob(path)\n img_list = []\n for img in img_path_list:\n img_list.append(\"/static/main/tmp/\" + (os.path.basename(img)))\n\n template_name = 'videopaal_mgmt/booking_update_customer.html'\n\n def get_context_data(self):\n context = super(BookingUpdateCustomer, self).get_context_data()\n context['backgrounds'] = self.img_list\n return context\n\nclass VideoCreate(LoginRequiredMixin, generic.CreateView):\n model = Video\n form_class = VideoCreateForm\n\n template_name = 'videopaal_mgmt/video_create.html'\n success_url = reverse_lazy('booking-list')\n\n def get_initial(self):\n initial = super(VideoCreate, self).get_initial()\n initial['booking'] = self.kwargs['booking_uid']\n return initial\n\n\nclass BookingDelete(LoginRequiredMixin, generic.DeleteView):\n model = Booking\n\n def get_object(self, queryset=None):\n obj = super(BookingDelete, self).get_object()\n return obj\n\n def get_success_url(self):\n return reverse('booking-list')\n\n\nclass VideoDelete(LoginRequiredMixin, generic.DeleteView):\n model = Video\n\n def get_object(self, queryset=None):\n obj = super(VideoDelete, self).get_object()\n return obj\n\n def get_success_url(self):\n print(self.object)\n print('Returning to:')\n print(self.object.booking.pk)\n return reverse('booking-view', kwargs={'pk': self.object.booking.pk})\n\n\ndef canvas(request, booking_uid):\n booking = get_object_or_404(Booking, pk=booking_uid)\n print(booking)\n\n if request.method == 'POST':\n print(\"Setting new canvas for booking %s\" % booking)\n booking.canvas_data = request.body\n booking.save(update_fields=['canvas_data'])\n return HttpResponse(json.dumps({'key': 'value'}), content_type=\"application/json\")\n\n elif request.method == 'GET':\n print(\"Retrieving canvas for booking %s\" % booking)\n canvas = booking.canvas_data\n if canvas:\n return HttpResponse(canvas, content_type=\"application/json\")\n else:\n raise Http404()\n\ndef png(request, booking_uid):\n booking = get_object_or_404(Booking, pk=booking_uid)\n print(booking)\n\n if request.method == 'POST':\n print(\"Setting new PNG for booking %s\" % booking)\n booking.svg_data = request.body\n booking.save(update_fields=['svg_data'])\n return HttpResponse(json.dumps({'key': 'value'}), content_type=\"application/json\")\n\n elif request.method == 'GET':\n print(\"Retrieving PNG for booking %s\" % booking)\n png = booking.svg_data\n if png:\n png = base64.b64decode(png)\n return HttpResponse(png, content_type=\"image/png\")\n else:\n raise Http404()\n\n@login_required\ndef package(request, booking_uid):\n booking = get_object_or_404(Booking, pk=booking_uid)\n\n print(\"Packaging all video files \")\n booking.package()\n\n return HttpResponseRedirect(reverse('booking-view', kwargs={'pk':booking.pk}))\n" }, { "alpha_fraction": 0.5482093691825867, "alphanum_fraction": 0.5785123705863953, "avg_line_length": 29.25, "blob_id": "673638a9737bc44fda1d4b835a6018f94510d1c0", "content_id": "9be3a378ed50ef06fbf4c77e71d991f9af3f841b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 726, "license_type": "no_license", "max_line_length": 191, "num_lines": 24, "path": "/videopaal_mgmt/migrations/0003_auto_20170507_1652.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-05-07 16:52\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('videopaal_mgmt', '0002_booking_local_package'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='booking',\n name='all_videos_uploaded',\n ),\n migrations.AlterField(\n model_name='booking',\n name='video_status',\n field=models.CharField(choices=[('NR', 'Not ready'), ('EN', 'Encoding'), ('PK', 'Packaging'), ('AU', 'Uploading archive to server'), ('RD', 'Ready')], default='NR', max_length=2),\n ),\n ]\n" }, { "alpha_fraction": 0.5616147518157959, "alphanum_fraction": 0.5803824067115784, "avg_line_length": 51.296295166015625, "blob_id": "59848539fe03f08d0ca7d5453d690918b4ca207d", "content_id": "944c02107dfdaa012dddada8257c678ea9009f5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2824, "license_type": "no_license", "max_line_length": 148, "num_lines": 54, "path": "/videopaal_mgmt/migrations/0001_initial.py", "repo_name": "dschoorisse/videopaal", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-04-17 14:55\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Booking',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('quotationnr', models.CharField(blank=True, max_length=20, null=True)),\n ('title', models.CharField(blank=True, max_length=100, null=True)),\n ('message', models.CharField(blank=True, max_length=500, null=True)),\n ('background', models.CharField(blank=True, max_length=100, null=True)),\n ('videos_in_archive', models.NullBooleanField()),\n ('delivery', models.NullBooleanField()),\n ('delivery_location', models.CharField(blank=True, max_length=100, null=True)),\n ('delivery_ready_before', models.DateTimeField(blank=True, null=True)),\n ('delivered', models.DateTimeField(blank=True, null=True)),\n ('event_date', models.CharField(max_length=100)),\n ('customer_first_name', models.CharField(max_length=100)),\n ('customer_last_name', models.CharField(max_length=100)),\n ('customer_email', models.EmailField(max_length=254)),\n ('customer_mobile', models.CharField(max_length=15)),\n ('customer_address', models.CharField(max_length=100)),\n ('payment_status', models.CharField(choices=[('PA', 'Paid'), ('UN', 'Unpaid')], default='UN', max_length=2)),\n ('video_status', models.CharField(choices=[('NR', 'Not ready'), ('UL', 'Uploading'), ('RD', 'Ready')], default='NR', max_length=2)),\n ('business_location', models.CharField(choices=[('NS', 'Unknown'), ('BR', 'Breda'), ('HU', 'Hulst')], default='NS', max_length=2)),\n ('link', models.URLField(blank=True, null=True)),\n ('last_time_updated_by_customer', models.DateTimeField(blank=True, null=True)),\n ('all_videos_uploaded', models.BooleanField(default=False)),\n ],\n ),\n migrations.CreateModel(\n name='Video',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('file', models.FileField(upload_to='uploads')),\n ('ready', models.BooleanField(default=False)),\n ('booking', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='videopaal_mgmt.Booking')),\n ],\n ),\n ]\n" } ]
17
VishalKanakamamidi/Ironman-Face-filter
https://github.com/VishalKanakamamidi/Ironman-Face-filter
b7df705a803d993757400c40784882955a3528d6
efd1558d61eaf54762ea4d63e98d436e54f831cd
a4a79683bf6ac74b8c42768c62bc0c8b4e2aa555
refs/heads/master
2020-05-17T00:21:54.005728
2019-04-25T08:55:56
2019-04-25T08:55:56
183,394,482
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5172145366668701, "alphanum_fraction": 0.5362769961357117, "avg_line_length": 38.81745910644531, "blob_id": "487fb26faff78f4e54aa3524bd42d1acbc5c37ce", "content_id": "960d2d54130f591b023a8482d9ab927fefd61689", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5141, "license_type": "no_license", "max_line_length": 119, "num_lines": 126, "path": "/ironface.py", "repo_name": "VishalKanakamamidi/Ironman-Face-filter", "src_encoding": "UTF-8", "text": "# Code adapted from Tensorflow Object Detection Framework\r\n# https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb\r\n# Tensorflow Object Detection Detector\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport cv2\r\nimport time\r\nfrom imutils.video import VideoStream\r\nfrom PIL import Image\r\nimport imutils\r\n\r\n\r\nclass DetectorAPI:\r\n def __init__(self, path_to_ckpt):\r\n self.path_to_ckpt = path_to_ckpt\r\n\r\n self.detection_graph = tf.Graph()\r\n with self.detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(self.path_to_ckpt, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n self.default_graph = self.detection_graph.as_default()\r\n self.sess = tf.Session(graph=self.detection_graph)\r\n\r\n # Definite input and output Tensors for detection_graph\r\n self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\r\n # Each box represents a part of the image where a particular object was detected.\r\n self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\r\n # Each score represent how level of confidence for each of the objects.\r\n # Score is shown on the result image, together with the class label.\r\n self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\r\n self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\r\n self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\r\n\r\n def processFrame(self, image):\r\n # Expand dimensions since the trained_model expects images to have shape: [1, None, None, 3]\r\n image_np_expanded = np.expand_dims(image, axis=0)\r\n # Actual detection.\r\n start_time = time.time()\r\n (boxes, scores, classes, num) = self.sess.run(\r\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\r\n feed_dict={self.image_tensor: image_np_expanded})\r\n end_time = time.time()\r\n\r\n print(\"Elapsed Time:\", end_time-start_time)\r\n\r\n im_height, im_width,_ = image.shape\r\n boxes_list = [None for i in range(boxes.shape[1])]\r\n for i in range(boxes.shape[1]):\r\n boxes_list[i] = (int(boxes[0,i,0] * im_height),\r\n int(boxes[0,i,1]*im_width),\r\n int(boxes[0,i,2] * im_height),\r\n int(boxes[0,i,3]*im_width))\r\n\r\n return boxes_list, scores[0].tolist(), [int(x) for x in classes[0].tolist()], int(num[0])\r\n\r\n def close(self):\r\n self.sess.close()\r\n self.default_graph.close()\r\n\r\nif __name__ == \"__main__\":\r\n model_path = 'frozen_inference_graph_face.pb'\r\n odapi = DetectorAPI(path_to_ckpt=model_path)\r\n threshold = 0.2\r\n print(\"[INFO] starting video stream...\")\r\n vs = VideoStream(src=0).start()\r\n #vs = VideoStream(usePiCamera=True).start()\r\n time.sleep(2.0)\r\n\r\n\r\n while True:\r\n count = 0\r\n while(True):\r\n \r\n img = vs.read() # for skipping frames\r\n count = count + 1\r\n \r\n if(count == 5):\r\n break\r\n img = vs.read()\r\n img = imutils.resize(img, width=600)\r\n\r\n boxes, scores, classes, num = odapi.processFrame(img)\r\n\r\n # Visualization of the results of a detection.\r\n if(len(boxes)>0):\r\n background = Image.fromarray(img)\r\n for i in range(len(boxes)):\r\n \r\n if scores[i] > threshold:\r\n box = boxes[i]\r\n # cv2.rectangle(img,(box[1],box[0]),(box[3],box[2]),(255,0,0),2)\r\n # cv2.putText(img, str(\"Face\"), (box[1]-10,box[0]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2)\r\n s_img = cv2.imread(\"iron.jpg\")\r\n # frame[y:y+278, x:x+172] = s_img\r\n # cv2.imshow(\"Frame1\", s_img)\r\n \r\n foreground = Image.open(\"iron.jpg\")\r\n x = box[1]\r\n y = box[0]\r\n w = box[3]-box[1]\r\n h = box[2]-box[0]\r\n size = w+int(w/1.3),h+int(h/1.3)\r\n foreground.thumbnail(size, Image.ANTIALIAS)\r\n x1 = (2*x+w)/2\r\n y1 = (2*y+h)/2\r\n background.paste(foreground, (int(x), int(y-y*0.2)), foreground)\r\n try:\r\n background.save(\"ironman.jpg\")\r\n except:\r\n continue\r\n img = cv2.imread(\"ironman.jpg\")\r\n \r\n cv2.imshow(\"Frame1\", img)\r\n if(len(boxes) == 0):\r\n cv2.imshow(\"Frame1\", frame)\r\n\r\n\r\n # cv2.imshow(\"preview\", img)\r\n key = cv2.waitKey(5)\r\n if key & 0xFF == ord('q'):\r\n break" }, { "alpha_fraction": 0.7010309100151062, "alphanum_fraction": 0.7113401889801025, "avg_line_length": 16.636363983154297, "blob_id": "c6f37ca0375535d2dbc89e318beebe9b800d271e", "content_id": "aae971cb6c674a141cca510c7c048c22f71d68e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 194, "license_type": "no_license", "max_line_length": 60, "num_lines": 11, "path": "/README.md", "repo_name": "VishalKanakamamidi/Ironman-Face-filter", "src_encoding": "UTF-8", "text": "# Ironman-Face-filter\nIronman Face Filter by Tensorflow object detection API <br/>\nDependecies: <br/>\nPython 3.6 <br/>\nTensorflow<br/>\nNumpy<br/>\nOpencv<br/>\nImutil<br/>\n\n\n![alt text](Show.png)\n" } ]
2
crubier/Fiabilipy
https://github.com/crubier/Fiabilipy
91229248bd504de02fa94a8da4f388b3ca0b949c
7959139021acd176ef615ee98edfdd241a330d78
f6534032fb15921786c12f3d4e76740ba0bd3e2e
refs/heads/master
2023-07-20T14:29:06.399975
2016-08-27T16:29:52
2016-08-27T16:29:52
66,722,602
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.46979865431785583, "alphanum_fraction": 0.46979865431785583, "avg_line_length": 28.799999237060547, "blob_id": "2de279e55303a03a236548c0ef27771261cada5c", "content_id": "6098d96a46d1d10024d1755ed2b752b2f0ca3dce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 149, "license_type": "no_license", "max_line_length": 50, "num_lines": 5, "path": "/documentation/source/api/system/system.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": ":class:`System` -- System building and description\n==================================================\n\n.. autoclass:: fiabilipy.System\n :members:\n" }, { "alpha_fraction": 0.7100371718406677, "alphanum_fraction": 0.7100371718406677, "avg_line_length": 15.8125, "blob_id": "e23fdaf7a58e6870c4577a57e601728dd9923674", "content_id": "543341edb066a53971ee3ddb2b48525853c046f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 270, "license_type": "no_license", "max_line_length": 80, "num_lines": 16, "path": "/documentation/source/examples/index.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "Examples\n========\n\nThis page gathers different examples of python scripts using fiabilipy. If you\nare using fiabilipy, feel free to send us your scripts, we will add them to this\npage !\n\n\n- :doc:`voter_intersection`\n\n\n\n.. toctree::\n :hidden:\n\n voter_intersection\n" }, { "alpha_fraction": 0.5476753115653992, "alphanum_fraction": 0.5547675490379333, "avg_line_length": 29.214284896850586, "blob_id": "6324ffea7e9a5a9d40c987ae79aded6285decc79", "content_id": "dfaf9e045753097c216309090c56f551db19e788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1274, "license_type": "no_license", "max_line_length": 79, "num_lines": 42, "path": "/examples/voters.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (unicode_literals, absolute_import, division,\n print_function)\n\nfrom fiabilipy import Voter, Component\nfrom sympy import Symbol, solve, exp\n\ndef voter_example(nmax=5, nmin=3):\n \"\"\"\n find when a real voter M/N is equivalent to a single component.\n all real voters (i.e. 1 < M < N) having `nmin <= N < nmax` are studied.\n\n Parameters\n ----------\n nmax: int, optional\n the maximum value of N (excluded)\n\n nmin: int, optional\n the minimum value of N (included)\n\n \"\"\"\n orders = ((M, N) for N in xrange(nmin, nmax) for M in xrange(2, N))\n l = Symbol('l', positive=True, null=False)\n t = Symbol('t', positive=True)\n x = Symbol('x')\n comp = Component('C', l)\n\n for order in orders:\n voter = Voter(comp, order[0], order[1])\n crossing = (voter.reliability(t) - comp.reliability(t)).nsimplify()\n roots = solve(crossing.subs(exp(-l*t), x))\n\n print('For M = {}, N = {}'.format(*order))\n print('− {} roots: '.format(len(roots)))\n for root in roots:\n print(' − {}'.format(root))\n print()\n\nif __name__ == '__main__':\n voter_example(nmax=6)\n" }, { "alpha_fraction": 0.48958688974380493, "alphanum_fraction": 0.5097302794456482, "avg_line_length": 39.96503448486328, "blob_id": "c6dccf2af090b9fafaada6723f8e0d5f9fd01908", "content_id": "0d3ccbcfa9a5206f9dfdc45196a427e6fe628f2f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11725, "license_type": "no_license", "max_line_length": 110, "num_lines": 286, "path": "/fiabilipy/test_system.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#Copyright (C) 2013 Chabot Simon, Sadaoui Akim\n\n#This program is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License along\n#with this program; if not, write to the Free Software Foundation, Inc.,\n#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import print_function, absolute_import\nimport unittest2\n\nfrom sympy import symbols, exp\nfrom networkx import DiGraph, is_isomorphic\n\nfrom fiabilipy import Component, Voter, System\n\nclass TestComponent(unittest2.TestCase):\n \"\"\" Test the Component class.\n \"\"\"\n def setUp(self):\n \"\"\" Here we build the component we will use as test subjects\n We assume components have constant failure and maintainability rates\n \"\"\"\n\n self.lambda_, self.mu = symbols('l, m',\n constant=True,\n positive=True,\n null=False\n )\n self.t = symbols('t', positive=True)\n self.component = Component('C', self.lambda_, self.mu)\n\n def test_reliability(self):\n \"\"\" Check the reliability is equals to the theorical one \"\"\"\n self.assertEqual(exp(-self.lambda_ * self.t),\n self.component.reliability(self.t))\n\n def test_maintainability(self):\n \"\"\" Check the maintainability is equals to the theorical one \"\"\"\n self.assertEqual(1 - exp(-self.mu * self.t),\n self.component.maintainability(self.t))\n\n def test_availability(self):\n \"\"\" Check the availability is equals to the theorical one \"\"\"\n availability = self.lambda_ * \\\n exp(self.t*(-self.lambda_ - self.mu)) / \\\n (self.lambda_ + self.mu) + \\\n self.mu/(self.lambda_ + self.mu)\n\n self.assertEqual(availability,\n self.component.availability(self.t))\n\nclass TestVoter(unittest2.TestCase):\n \"\"\" Test the Voter class.\n \"\"\"\n pass\n\nclass TestSystem(unittest2.TestCase):\n \"\"\" Test the System class.\n \"\"\"\n\n def setUp(self):\n \"\"\" Here we build some standard systems we will use as test subjects\n \"\"\"\n systems = {'simple':System(),\n 'series-parallel':System(),\n 'parallel-series':System(),\n 'complex':System(),\n 'voter':System(),\n }\n\n lambdas = {'alim': symbols('l_alim', positive=True, null=False),\n 'motor': symbols('l_motor', positive=True, null=False),\n }\n mus = {'alim': symbols('m_alim', positive=True, null=False),\n 'motor': symbols('m_motor', positive=True, null=False),\n }\n\n alim = [Component('Alim_A', lambda_=lambdas['alim'], mu=mus['alim']),\n Component('Alim_B', lambda_=lambdas['alim'], mu=mus['alim']),\n Component('Alim_C', lambda_=lambdas['alim'], mu=mus['alim']),\n ]\n motors = [\n Component('Motor_A', lambda_=lambdas['motor'], mu=mus['motor']),\n Component('Motor_B', lambda_=lambdas['motor'], mu=mus['motor']),\n ]\n\n voter = Voter(alim[0], M=2, N=3)\n\n systems['simple']['E'] = alim[0]\n systems['simple'][alim[0]] = motors[0]\n systems['simple'][motors[0]] = 'S'\n\n systems['series-parallel']['E'] = [alim[0], alim[1]]\n systems['series-parallel'][alim[0]] = motors[0]\n systems['series-parallel'][alim[1]] = motors[1]\n systems['series-parallel'][motors[0]] = 'S'\n systems['series-parallel'][motors[1]] = 'S'\n\n systems['parallel-series']['E'] = [alim[0], alim[1]]\n systems['parallel-series'][alim[0]] = [motors[0], motors[1]]\n systems['parallel-series'][alim[1]] = [motors[0], motors[1]]\n systems['parallel-series'][motors[0]] = 'S'\n systems['parallel-series'][motors[1]] = 'S'\n\n systems['complex']['E'] = [alim[0], alim[1], alim[2]]\n systems['complex'][alim[0]] = motors[0]\n systems['complex'][alim[1]] = [motors[0], motors[1]]\n systems['complex'][alim[2]] = motors[1]\n systems['complex'][motors[0]] = 'S'\n systems['complex'][motors[1]] = 'S'\n\n systems['voter']['E'] = voter\n systems['voter'][voter] = [motors[0], motors[1]]\n systems['voter'][motors[0]] = 'S'\n systems['voter'][motors[1]] = 'S'\n\n self.systems = systems\n self.alim = alim\n self.motors = motors\n self.voter = voter\n self.lambdas = lambdas\n self.mus = mus\n\n def test_successpaths(self):\n \"\"\" Check that all success paths are correctly found.\n \"\"\"\n paths = {\n 'simple' : set([('E', self.alim[0], self.motors[0], 'S')]),\n 'series-parallel': set([('E', self.alim[0], self.motors[0], 'S'),\n ('E', self.alim[1], self.motors[1], 'S')]),\n 'parallel-series': set([('E', self.alim[0], self.motors[0], 'S'),\n ('E', self.alim[0], self.motors[1], 'S'),\n ('E', self.alim[1], self.motors[1], 'S'),\n ('E', self.alim[1], self.motors[0], 'S')]),\n 'complex': set([('E', self.alim[0], self.motors[0], 'S'),\n ('E', self.alim[1], self.motors[0], 'S'),\n ('E', self.alim[1], self.motors[1], 'S'),\n ('E', self.alim[2], self.motors[1], 'S')]),\n 'voter': set([('E', self.voter, self.motors[0], 'S'),\n ('E', self.voter, self.motors[1], 'S')]),\n }\n\n for (name, S) in self.systems.items():\n for path in S.successpaths:\n paths[name].remove(tuple(path))\n self.assertEqual(paths[name], set())\n\n def test_minimalcuts(self):\n \"\"\" Check for minimal cut of orders 1 and 2.\n \"\"\"\n cuts = {\n 1: {\n 'simple': set([frozenset([self.alim[0]]),\n frozenset([self.motors[0]])]),\n 'series-parallel': set(),\n 'parallel-series': set(),\n 'complex': set(),\n 'voter': set([frozenset([self.voter])]),\n },\n 2: {\n 'simple': set([frozenset([self.alim[0]]),\n frozenset([self.motors[0]])]),\n 'series-parallel': set([\n frozenset([self.alim[1], self.alim[0]]),\n frozenset([self.alim[1], self.motors[0]]),\n frozenset([self.motors[1], self.alim[0]]),\n frozenset([self.motors[1], self.motors[0]]),\n ]),\n 'parallel-series': set([\n frozenset([self.alim[1], self.alim[0]]),\n frozenset([self.motors[1], self.motors[0]])]),\n 'complex': set([frozenset([self.motors[0], self.motors[1]])]),\n 'voter': set([frozenset([self.voter]),\n frozenset([self.motors[0], self.motors[1]])]),\n },\n }\n\n for order in [1, 2]:\n for (name, S) in self.systems.items():\n for cut in S.minimalcuts(order):\n cuts[order][name].remove(cut)\n self.assertEqual(cuts[order][name], set([]))\n\n def test_mttfvalues(self):\n r\"\"\" Check if the calculated MTTF values are correct.\n Testing MTTF values is interesting because there are computed by\n integration of reliability from 0 to \\inf.\n So if the values of MTTF are correct, it means :\n - MTTF values are correct\n - Reliabitily value for any t are correct too.\n The drawback is that if this test fails, we don’t known which of\n MTTF property or reliability method is failing.\n \"\"\"\n\n la = self.lambdas['alim']\n lm = self.lambdas['motor']\n mttf = {'simple': 1.0/(la + lm),\n 'series-parallel': 3.0/(2*(la + lm)),\n 'parallel-series':\n 1.0/(2*la + 2*lm) - 2.0/(2*la + lm) \\\n - 2.0/(la + 2*lm) + 4.0/(la + lm),\n 'complex':\n 4.0/(la + lm) - 1.0/(2*lm + la) + 1.0/(2*lm + 3*la) \\\n - 1.0/(2*lm + 2*la) - 2.0/(2*la + lm),\n 'voter':\n 6.0/(2*la + lm) - 3.0/(2*la + 2*lm) - 6.0/(3*la + lm) \\\n + 3.0/(3*la + 2*lm) + 2.0/(3*la + lm) - 1.0/(3*la + 2*lm)\n }\n\n for (name, values) in mttf.items():\n diff = values - self.systems[name].mttf\n self.assertEqual(diff.simplify(), 0)\n\n def test_graphmanagement(self):\n \"\"\" Check if the constructing a system by its graph works as intended.\n \"\"\"\n component = [Component('C%s' % i, 1e-3) for i in range(4)]\n system = System()\n\n #because 'E' must be the first inserted element\n with self.assertRaises(ValueError):\n system[component[0]] = 'S'\n\n #Assert the following constructions don’t fail.\n #from a list\n system['E'] = [component[0], component[1]]\n system[component[0]] = 'S'\n wanted = DiGraph({'E':[component[0].__str__(), component[1].__str__()], component[0].__str__():['S']})\n self.assertTrue(is_isomorphic(system._graph, wanted))\n\n del system[component[0]] #This component isn’t used anymore\n #from a single element\n system['E'] = component[1]\n system[component[1]] = 'S'\n wanted = DiGraph({'E':[component[1].__str__()], component[1].__str__():'S'})\n self.assertTrue(is_isomorphic(system._graph, wanted))\n\n def test_cache(self):\n \"\"\" Perfom some tests on the cache\n \"\"\"\n components = [Component('C{}'.format(i), 1e-3) for i in (0, 1, 2)]\n system = System()\n\n # +-- C0 --+\n # | |\n # E ---| +-- C2 -- S\n # | |\n # +-- C1 --+\n\n system['E'] = [components[0], components[1]]\n system[components[0]] = components[2]\n system[components[1]] = components[2]\n system[components[2]] = 'S'\n\n self.assertAlmostEqual(system.mttf, 2000/3.)\n self.assertIn('mttf', system._cache) #The mttf is cached\n\n components[0].lambda_ = 0.05 #Let’s change the failure rate\n self.assertEqual(system._cache, dict()) #The cache is now empty\n self.assertAlmostEqual(system.mttf, 331750/663.)\n self.assertIn('mttf', system._cache) #The mttf is cached\n\n #now, check if it works with a shared component\n othersystem = System()\n othersystem['E'] = components[0]\n othersystem[components[0]] = 'S'\n\n self.assertAlmostEqual(othersystem.mttf, 20)\n components[0].lambda_ = 2e-4\n self.assertAlmostEqual(othersystem.mttf, 5000)\n self.assertAlmostEqual(system.mttf, 29000/33.)\n\nif __name__ == '__main__':\n unittest2.main()\n" }, { "alpha_fraction": 0.5023474097251892, "alphanum_fraction": 0.5335923433303833, "avg_line_length": 28.840579986572266, "blob_id": "1da11aeb4b70dba71f3b97993c80d0b1580245ae", "content_id": "00f4923a95dd1ea855f432a3c899f44cc3affd3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6179, "license_type": "no_license", "max_line_length": 80, "num_lines": 207, "path": "/fiabilipy/component.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#Copyright (C) 2013 Chabot Simon, Sadaoui Akim\n\n#This program is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License along\n#with this program; if not, write to the Free Software Foundation, Inc.,\n#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nr\"\"\" Component design module\n\nThis modude gives tools to design basic components and compute some metrics,\nsuch as the reliability, the availability, the Mean-Time-To-Failure, and so on.\n\n\"\"\"\nfrom __future__ import division\nfrom builtins import object\nfrom past.utils import old_div\n\nfrom sympy import exp, Symbol, oo\n\n__all__ = ['Component']\n\nclass Component(object):\n r\"\"\" Describe a component with a constant failure rate.\n\n This class is used to create all the components of a system.\n\n Attributes\n ----------\n name : str\n the name of the component. (It has to be a unique name for the whole\n system)\n lambda_ : float\n the constant failure rate of the component\n mu : float, optional\n the constant maintainability rate of the component\n initialy_avaible : boolean, optional\n whether the component is avaible at t=0 or not\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> motor.lambda_\n 0.0001\n \"\"\"\n\n def __init__(self, name, lambda_, mu=0, initialy_avaible=True):\n self.__dict__[\"_systems\"] = set()\n self.lambda_ = lambda_\n self.mu = mu\n self.name = name\n self.initialy_avaible = initialy_avaible\n\n def __lt__(self,other):\n if(isinstance(other, str)):\n return self.name < other\n else:\n return self.name < other.name\n\n def __setattr__(self, name, value):\n for system in self._systems:\n system._cache = {}\n self.__dict__[name] = value\n\n def __repr__(self):\n return u'Component(%s)' % self.name\n\n def __str__(self):\n return self.name\n\n def reliability(self, t):\n r\"\"\" Compute the reliability of the component at `t`\n\n This method compute the reliability of the component at `t`.\n\n Parameters\n ----------\n t : float or Symbol\n\n Returns\n -------\n out : float or symbolic expression\n The reliability calculated for the given `t`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> t = Symbol('t', positive=True)\n >>> motor.reliability(t)\n exp(-0.0001*t)\n >>> motor.reliability(1000)\n 0.904837418035960\n \"\"\"\n return exp(-self.lambda_ * t)\n\n def maintainability(self, t):\n r\"\"\" Compute the maintainability of the component at `t`\n\n This method compute the maintainability of the component at `t`.\n\n Parameters\n ----------\n t : int or Symbol\n\n Returns\n -------\n out : float or symbolic expression\n The maintainability calculated for the given `t`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> t = Symbol('t', positive=True)\n >>> motor.maintainability(t)\n -exp(-0.03*t) + 1.0\n >>> motor.maintainability(1000)\n 0.999999999999906\n \"\"\"\n return 1.0 - exp(-self.mu * t)\n\n def availability(self, t):\n r\"\"\" Compute the availability of the component at `t`\n\n This method compute the availability of the component at `t`.\n\n Parameters\n ----------\n t : int or Symbol\n\n Returns\n -------\n out : float or symbolic expression\n The availability calculated for the given `t`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> t = Symbol('t', positive=True)\n >>> motor.availability(t)\n 0.00332225913621263*exp(-0.0301*t) + 0.996677740863787\n >>> motor.availability(1000)\n 0.996677740863788\n \"\"\"\n if self.mu == self.lambda_ == 0:\n return 1\n a = old_div(self.mu, (self.mu + self.lambda_))\n if self.initialy_avaible:\n b = old_div(self.lambda_, (self.mu + self.lambda_))\n else:\n b = old_div(- self.mu, (self.mu + self.lambda_))\n\n return a + b*exp(-(self.lambda_ + self.mu) * t)\n\n @property\n def mttf(self):\n r\"\"\" Compute the Mean-Time-To-Failure of the component\n\n The MTTF is defined as :\n :math:`MTTF = \\int_{0}^{\\infty} R(t)dt = \\frac{1}{\\lambda}`\n\n when the failure rate (:math:`\\lambda` is constant)\n\n Returns\n -------\n out : float\n The component MTTF\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> motor.mttf\n 10000.0\n \"\"\"\n return old_div(1.0,self.lambda_)\n\n @property\n def mttr(self):\n r\"\"\" Compute the Mean-Time-To-Repair of the component\n\n The MTTR is defined as :\n :math:`MTTR = \\int_{0}^{\\infty} 1 - M(t)dt = \\frac{1}{\\mu}`\n\n when the failure rate (:math:`\\mu` is constant)\n\n Returns\n -------\n out : float\n The component MTTR\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> motor.mttr\n 33.333333333333336\n \"\"\"\n return old_div(1.0,self.mu)\n" }, { "alpha_fraction": 0.5974465608596802, "alphanum_fraction": 0.617509126663208, "avg_line_length": 39.400001525878906, "blob_id": "4d408f516da1487aa9724f7d8cecb41fcdedfd02", "content_id": "2eb18335271166c025dc2123fc381a3b6580c03e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3853, "license_type": "no_license", "max_line_length": 85, "num_lines": 95, "path": "/fiabilipy/test_markov.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#Copyright (C) 2013 Chabot Simon, Sadaoui Akim\n\n#This program is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License along\n#with this program; if not, write to the Free Software Foundation, Inc.,\n#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import print_function, absolute_import\nfrom builtins import range\nimport unittest2\n\nfrom random import random\n\nfrom fiabilipy import System, Component, Markovprocess\n\nclass TestMarkov(unittest2.TestCase):\n#The reliabily and the availability of systems is already tested. Let’s\n#assume they are correct for any systems.\n#The idea behind those tests is very simple. We build different systems twice :\n# - one with the “standard” way, by describing a system by its reliabily block\n# diagram.\n# - one with a Markov process, which is going to be tested\n#once that is done, the availabilities computed have just to be compared each\n#others. It a different result is found, then there is a bug (or more…)\n\n def setUp(self):\n #Let’s build some standard systems\n systems = {'series-parallel': System(),\n 'parallel-series': System(),\n 'simple': System()\n }\n\n lambdas = {'alim':1e-4, 'motor':2e-5}\n mus = {'alim':5e-4, 'motor':2e-3}\n\n alim = [Component('Alim_A', lambda_=lambdas['alim'], mu=mus['alim']),\n Component('Alim_B', lambda_=lambdas['alim'], mu=mus['alim']),\n ]\n motors = [Component('Motor_A', lambda_=lambdas['motor'], mu=mus['motor']),\n Component('Motor_B', lambda_=lambdas['motor'], mu=mus['motor']),\n ]\n\n systems['simple']['E'] = alim[0]\n systems['simple'][alim[0]] = motors[0]\n systems['simple'][motors[0]] = 'S'\n\n systems['series-parallel']['E'] = [alim[0], alim[1]]\n systems['series-parallel'][alim[0]] = motors[0]\n systems['series-parallel'][alim[1]] = motors[1]\n systems['series-parallel'][motors[0]] = 'S'\n systems['series-parallel'][motors[1]] = 'S'\n\n systems['parallel-series']['E'] = [alim[0], alim[1]]\n systems['parallel-series'][alim[0]] = [motors[0], motors[1]]\n systems['parallel-series'][alim[1]] = [motors[0], motors[1]]\n systems['parallel-series'][motors[0]] = 'S'\n systems['parallel-series'][motors[1]] = 'S'\n\n #Let’s build the markov equivalent system\n self.components = (alim[0], alim[1], motors[0], motors[1])\n self.process = Markovprocess(self.components, {0:1}) #All the components work\n\n self.states = {\n 'series-parallel': lambda x : (x[0]*x[2]) + (x[1]*x[3]),\n 'parallel-series': lambda x : (x[0]+x[1]) * (x[2]+x[3]),\n 'simple': lambda x : x[0]*x[2],\n }\n\n self.systems = systems\n\n def test_availability(self):\n #Let’s do `maxiter` checks of availability values, for times randomly\n #picked between [0, `maxtime`)\n maxiter = 1000\n maxtime = 420000\n for _ in range(maxiter):\n t = random() * maxtime\n for name, states in self.states.items():\n self.assertAlmostEqual(self.process.value(t, states),\n self.systems[name].availability(t))\n\nif __name__ == '__main__':\n unittest2.main()\n" }, { "alpha_fraction": 0.48086148500442505, "alphanum_fraction": 0.4966624975204468, "avg_line_length": 32.879859924316406, "blob_id": "5c1fe0b73604a419baeeff7157b05b82cd024fee", "content_id": "593c4475b0e0f40e7e2c44081768ddf7f577379c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19201, "license_type": "no_license", "max_line_length": 116, "num_lines": 566, "path": "/fiabilipy/system.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#Copyright (C) 2013 Chabot Simon, Sadaoui Akim\n\n#This program is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License along\n#with this program; if not, write to the Free Software Foundation, Inc.,\n#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nr\"\"\" Reliability system design and computation\n\nThis module gives classes and functions to design complex systems and\ncompute some metrics, such as the reliability, the availability, the\nMean-Time-To-Failure, and so on.\n\n\"\"\"\nfrom __future__ import print_function\nfrom builtins import range\nfrom builtins import object\n\nfrom numpy import empty, ones, delete\nfrom sympy import exp, Symbol, oo\nfrom scipy.special import binom\nfrom itertools import combinations, chain\nfrom collections import Iterable\nimport networkx as nx\n\nfrom fiabilipy import Component\nfrom functools import reduce\n\n__all__ = ['System']\n\nALLSUBSETS = lambda n: (chain(*[combinations(list(range(n)), ni)\n for ni in range(n+1)]))\n\n\nclass System(object):\n r\"\"\" Describe a system with different components.\n\n The components are linked together thanks to a reliability diagram.\n This reliability diagram is represented by a graph. This graph\n *must* have two special nodes called `E` and `S`. `E` represents the\n start of the system and `S` its end (names stand for “Entrée”\n (start) and “Sortie” (end) in French).\n\n Examples\n --------\n\n Let’s have a look to the following system::\n\n | -- C0 -- |\n E -- | | -- C2 -- S\n | -- C1 -- |\n\n Thus, to represent such a system, you must create the three\n components C0, C1 and C2 and link them.\n\n >>> C = [Component(i, 1e-4) for i in xrange(3)]\n >>> S = System()\n >>> S['E'] = [C[0], C[1]]\n >>> S[C[0]] = [C[2]]\n >>> S[C[1]] = [C[2]]\n >>> S[C[2]] = ['S']\n\n So, you can use the `System` object as a simple python dictionnary\n where each key is a component and the value associated it the list\n of the component’s successors.\n \"\"\"\n\n def __init__(self, graph=None):\n self._graph = nx.DiGraph(graph)\n self._map = {'E':'E','S':'S'} #FIXME create map str -> component in case graph is non empty\n self._cache = {}\n self._t = Symbol('t', positive=True)\n\n def __getitem__(self, component):\n return self._map[self._graph[component.__str__()]]\n\n def __setitem__(self, component, successors):\n #Let’s do different checks before inserting the element\n if not isinstance(successors, Iterable):\n if not isinstance(successors, Component):\n msg = u'successors must be a list of components, a component '\n raise ValueError(msg)\n successors = [successors]\n if component != 'E' and 'E' not in self._graph:\n msg = u\"'E' must be the first inserted component\"\n raise ValueError(msg)\n for successor in successors:\n if successor != 'S':\n successor._systems.add(self)\n self._graph.add_edge(component.__str__(), successor.__str__())\n self._map[component.__str__()]=component\n self._map[successor.__str__()]=successor #FIXME this may be optional\n\n #reset the cache\n self._cache = {}\n\n def __delitem__(self, component):\n for c in self._graph:\n try:\n self._graph.remove_edge(c, component.__str__())\n except nx.NetworkXError: #i.e. edge(c, component) does not exist\n pass\n except AttributeError:\n assert self._graph[c] == 'S'\n self._graph.remove_node(component.__str__())\n if component not in self.components:\n component._systems.remove(self)\n del self._map[component.__str__()]\n #reset the cache\n self._cache = {}\n\n def __len__(self):\n return len(self._graph)\n\n def __repr__(self):\n return u'I\\'m a system'\n\n def copy(self):\n r\"\"\" Return a copy of the system.\n\n Returns\n -------\n out: System\n A copy of the current system\n\n Notes\n -----\n The components are the same (same reference).\n Only the internal graph is new\n \"\"\"\n _copy = System()\n _copy['E'] = [] #'E' must be the first inserted component\n for c in self._graph:\n _copy[c] = self[c][:]\n _copy._map = self._map.copy()\n return _copy\n\n @property\n def components(self):\n r\"\"\" The list of the components used by the system\n\n Returns\n -------\n out: list\n the list of the components used by the system, except `E` and\n `S`\n \"\"\"\n #FIXME Vincent it should be the component not its str\n return [self._map[comp] for comp in self._graph if comp not in ('E', 'S')]\n\n def _probabilitiescomputation(self, t, method):\n \"\"\" Given a system and a `method` (either availability or\n maintainability or reliability), this method evaluates the asking\n value by exploring the graph at time `t`.\n \"\"\"\n #TODO : improve complexity ?\n # n\n # P(U a_i) = sum (-1)^{-1+|s|} P(^a_i)\n # i=1 s\\in[1,n], i\\in s\n # s != {}\n #\n paths = self.successpaths\n R = 0.0\n for S in ALLSUBSETS(len(paths)):\n if not S:\n continue\n comps = set([c for i in S for c in paths[i][1:-1]])\n r = reduce(lambda x, y:x*getattr(y, method)(t), comps, 1)\n R += -r if len(S) % 2 == 0 else r\n return R\n\n def availability(self, t):\n r\"\"\" Compute the availability of the whole system\n\n This method compute the availability of the system at `t`.\n\n Parameters\n ----------\n t : float or Symbol\n\n Returns\n -------\n out : float or symbolic expression\n The availability calculated for the given `t`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> power = Component('P', 1e-6, 2e-4)\n >>> t = Symbol('t', positive=True)\n >>> S = System()\n >>> S['E'] = [power]\n >>> S[power] = [motor]\n >>> S[motor] = 'S'\n >>> S.availability(t) #doctest: +NORMALIZE_WHITESPACE\n (200/201 + exp(-201*t/1000000)/201)*(300/301 +\n exp(-301*t/10000)/301)\n >>> S.availability(1000)\n 0.995774842225189\n \"\"\"\n try:\n formula = self._cache['availability']\n except KeyError:\n formula = self._probabilitiescomputation(self._t, 'availability')\n self._cache['availability'] = formula\n\n if isinstance(t, Symbol):\n return formula.nsimplify()\n else:\n return formula.subs(self._t, t).evalf()\n\n def reliability(self, t):\n r\"\"\" Compute the reliability of the whole system\n\n This method compute the reliability of the system at `t`.\n\n Parameters\n ----------\n t : float or Symbol\n\n Returns\n -------\n out : float or symbolic expression\n The reliability calculated for the given `t`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> power = Component('P', 1e-6, 2e-4)\n >>> t = Symbol('t', positive=True)\n >>> S = System()\n >>> S['E'] = [power]\n >>> S[power] = [motor]\n >>> S[motor] = 'S'\n >>> S.reliability(t)\n exp(-101*t/1000000)\n >>> S.reliability(1000)\n 0.903933032885864\n \"\"\"\n try:\n formula = self._cache['reliability']\n except KeyError:\n formula = self._probabilitiescomputation(self._t, 'reliability')\n self._cache['reliability'] = formula\n\n if isinstance(t, Symbol):\n return formula.nsimplify()\n else:\n return formula.subs(self._t, t).evalf()\n\n def maintainability(self, t):\n r\"\"\" Compute the maintainability of the whole system\n\n This method compute the maintainability of the system at `t`.\n\n Parameters\n ----------\n t : float or Symbol\n\n Returns\n -------\n out : float or symbolic expression\n The maintainability calculated for the given `t`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> power = Component('P', 1e-6, 2e-4)\n >>> t = Symbol('t', positive=True)\n >>> S = System()\n >>> S['E'] = [power]\n >>> S[power] = [motor]\n >>> S[motor] = 'S'\n >>> S.maintainability(t)\n (1 - exp(-3*t/100))*(1 - exp(-t/5000))\n >>> S.maintainability(1000)\n 0.181269246922001\n \"\"\"\n try:\n formula = self._cache['maintainability']\n except KeyError:\n formula = self._probabilitiescomputation(self._t, 'maintainability')\n self._cache['maintainability'] = formula\n\n if isinstance(t, Symbol):\n return formula.nsimplify()\n else:\n return formula.subs(self._t, t).evalf()\n\n @property\n def mttf(self):\n r\"\"\" Compute the Mean-Time-To-Failure of the system\n\n The MTTF is defined as :\n :math:`MTTF = \\int_{0}^{\\infty} R(t)dt`\n\n Returns\n -------\n out : float\n The system MTTF\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> power = Component('P', 1e-6, 2e-4)\n >>> S = System()\n >>> S['E'] = [power]\n >>> S[power] = [motor]\n >>> S[motor] = 'S'\n >>> S.mttf\n 1000000/101\n \"\"\"\n try:\n return self._cache['mttf']\n except KeyError:\n t = Symbol('t', positive=True)\n self._cache['mttf'] = self.reliability(t).integrate((t, 0, oo))\n return self._cache['mttf']\n\n @property\n def mttr(self):\n r\"\"\" Compute the Mean-Time-To-Repair of the system\n\n The MTTR is defined as :\n :math:`MTTF = \\int_{0}^{\\infty} 1 - M(t)dt`\n\n Returns\n -------\n out : float\n The system MTTR\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> power = Component('P', 1e-6, 2e-4)\n >>> S = System()\n >>> S['E'] = [power]\n >>> S[power] = [motor]\n >>> S[motor] = 'S'\n >>> S.mttr\n 2265100/453\n \"\"\"\n try:\n return self._cache['mttr']\n except KeyError:\n t = Symbol('t', positive=True)\n mttr = (1 - self.maintainability(t)).integrate((t, 0, oo))\n self._cache['mttr'] = mttr\n return self._cache['mttr']\n\n\n @property\n def successpaths(self):\n r\"\"\" Return all the success paths of the reliability diagram\n\n A success path is defined as a path from 'E' to 'S'.\n\n Returns\n -------\n out : list of paths\n the list of all the success paths. A path, is defined as a list\n of components\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> powers = [Component('P{}'.format(i), 1e-6, 2e-4) for i in (0,1)]\n >>> S = System()\n >>> S['E'] = [powers[0], powers[1]]\n >>> S[powers[0]] = S[powers[1]] = [motor]\n >>> S[motor] = 'S'\n >>> S.successpaths #doctest: +NORMALIZE_WHITESPACE\n [['E', Component(P0), Component(M), 'S'],\n ['E', Component(P1), Component(M), 'S']]\n \"\"\"\n try:\n return self._cache['successpaths']\n except KeyError:\n self._cache['successpaths'] = list(self.findallpaths('E', 'S'))\n return self._cache['successpaths']\n\n def findallpaths(self, start='E', end='S'):\n r\"\"\" Find all paths between two components in the reliability diagram\n\n Parameters\n ----------\n start : Component, optional\n find paths from this component\n end : Component, optional\n find paths to this component\n\n Returns\n -------\n out : iterator\n an iterator on the paths from `start` to `stop`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> powers = [Component('P{}'.format(i), 1e-6, 2e-4) for i in (0,1)]\n >>> S = System()\n >>> S['E'] = [powers[0], powers[1]]\n >>> S[powers[0]] = S[powers[1]] = [motor]\n >>> S[motor] = 'S'\n >>> list(S.findallpaths(start=powers[0])) #doctest: +NORMALIZE_WHITESPACE\n [[Component(P0), Component(M), 'S']]\n \"\"\"\n return [[self._map[x] for x in l] for l in nx.all_simple_paths(self._graph, start.__str__(), end.__str__())]\n\n def minimalcuts(self, order=1):\n r\"\"\" List the minimal cuts of the system of order <= `order`\n\n A minimal cut of order :math:`n`, is a set of :math:`n` components,\n such as if there all unavailable, the whole system is unavailable.\n\n This function aims to find out every minimal cuts of order inferior\n to `order`.\n\n Parameters\n ----------\n order : int, optional\n The maximal order to look for.\n\n Returns\n -------\n out : list of frozensets\n each frozenset contains the components that constitute a minimal\n cut\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> powers = [Component('P{}'.format(i), 1e-6, 2e-4) for i in (0,1)]\n >>> S = System()\n >>> S['E'] = [powers[0], powers[1]]\n >>> S[powers[0]] = S[powers[1]] = [motor]\n >>> S[motor] = 'S'\n >>> S.minimalcuts(order=1) #doctest: +ELLIPSIS\n [frozenset(...)]\n >>> S.minimalcuts(order=2) #doctest: +ELLIPSIS\n [frozenset(...), frozenset(...)]\n \"\"\"\n paths = self.successpaths\n incidence = empty((len(paths), len(self.components)))\n\n for path in range(len(paths)):\n for comp in range(len(self.components)):\n if self.components[comp] in paths[path]:\n incidence[path, comp] = 1\n else:\n incidence[path, comp] = 0\n\n pairs = list(self.components)\n minimal = []\n\n for k in range(1, order+1):\n if incidence.shape[1] == 0: #No more minimalcuts\n break\n #Let’s looking for column of ones\n vones = ones(len(paths))\n indicetodelete = []\n for comp in range(len(pairs)):\n if (incidence[:, comp] == vones).all():\n if isinstance(pairs[comp], frozenset):\n minimal.append(pairs[comp])\n else:\n minimal.append(frozenset([pairs[comp]]))\n indicetodelete.append(comp)\n\n if k >= order:\n #so it’s useless to compute newpairs and the new incidence\n #matrix because they won’t be used anymore.\n continue\n\n incidence = delete(incidence, indicetodelete, axis=1)\n pairs = [p for i, p in enumerate(pairs) if i not in indicetodelete]\n newpairs = list(combinations(list(range(len(pairs))), k+1))\n incidence_ = empty((len(paths), len(newpairs)))\n for x in range(incidence_.shape[0]):\n for y in range(incidence_.shape[1]):\n value = 0\n for comp in newpairs[y]:\n if incidence[x, comp]:\n value = 1\n break\n incidence_[x, y] = value\n\n incidence = incidence_\n pairs = [frozenset([pairs[x] for x in p]) for p in newpairs]\n\n return minimal\n\n def faulttreeanalysis(self, output=None, order=2):\n r\"\"\" Build the fault tree analysis of the system\n\n Print (or write) the content of the dot file needed to draw the\n fault tree of the system.\n\n Parameters\n ----------\n output : file-like object, optional\n If `output` is given, then the content is written into this\n file. `output` *must* have a :py:meth:`write` method.\n\n order : int, optional\n This is the maximum order of the minimal cuts the function looks\n for.\n\n Notes\n -----\n Please, see the `Graphviz <http://graphviz.org/>` website for more\n information about how to transform the ouput code into a nice\n picture.\n\n \"\"\"\n #TODO the tree needs to be simplified\n cuts = self.minimalcuts(order)\n data = ['digraph G {']\n data.append('\\t\"not_S\" -> \"or\"')\n for i, cut in enumerate(cuts):\n data.append('\\tor -> and_%s' % i)\n for comp in cut:\n data.append('\\tand_%s -> \"%s\"' % (i, comp.name))\n data.append('}')\n\n if not output:\n print('\\n'.join(data))\n else:\n try:\n output.write('\\n'.join(data) + '\\n')\n except AttributeError:\n with open(output, 'w') as fobj:\n fobj.write('\\n'.join(data) + '\\n')\n\n\n def draw(self):\n r\"\"\" Draw the system\n\n Draw the system with graphviz.\n\n Examples\n --------\n >>> import pylab as p\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> powers = [Component('P{}'.format(i), 1e-6, 2e-4) for i in (0,1)]\n >>> S = System()\n >>> S['E'] = [powers[0], powers[1]]\n >>> S[powers[0]] = S[powers[1]] = [motor]\n >>> S[motor] = 'S'\n >>> S.draw()\n >>> p.show()\n\n \"\"\"\n nx.draw_graphviz(self._graph)\n" }, { "alpha_fraction": 0.5196078419685364, "alphanum_fraction": 0.529411792755127, "avg_line_length": 11.75, "blob_id": "5fb287c42be835426e829fa8868728465256345b", "content_id": "f7f6541a6841860adf8ada71be8e871250438640", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 102, "license_type": "no_license", "max_line_length": 17, "num_lines": 8, "path": "/documentation/source/api/index.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "API Documentation\n=================\n\n.. toctree::\n :maxdepth: 2\n\n system/index\n markov/index\n" }, { "alpha_fraction": 0.5037028789520264, "alphanum_fraction": 0.5203372240066528, "avg_line_length": 35.57083511352539, "blob_id": "f866c82b5d1925a65edacd6949e189255f2ce502", "content_id": "86d5792e0369529f6ed81e99a57779c0b6b6e380", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8783, "license_type": "no_license", "max_line_length": 80, "num_lines": 240, "path": "/fiabilipy/markov.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#Copyright (C) 2013 Chabot Simon, Sadaoui Akim\n\n#This program is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License along\n#with this program; if not, write to the Free Software Foundation, Inc.,\n#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import print_function\nfrom builtins import range\nfrom builtins import object\n\nfrom numpy import zeros, binary_repr, where, array\nfrom scipy.linalg import expm\n\n__all__ = ['Markovprocess']\n\nclass Markovprocess(object):\n \"\"\" Initialize the markov process management of the system.\n\n Parameters\n ----------\n components : list\n the is the list of the components to manage\n initstates : dict or list\n describes the initial state of the process, giving the initial\n probabilities of being in each situation\n\n Examples\n --------\n Let S be a system of two components A and B.\n\n >>> from fiabilipy import Component\n >>> A, B = Component('A', 1e-2), Component('B', 1e-6)\n >>> comp = (A, B)\n >>> init = [0.8, 0.1, 0.1, 0]\n >>> process = Markovprocess(comp, init)\n\n * `init[0]` is the probability of having A and B working\n * `init[1]` is the probability of having A working and not B\n * `init[2]` is the probability of having B working and not A\n * `init[3]` is the probability of having neither A nor B working\n\n In a general way `init[i]` is the probability of having::\n\n >>> for c, state in enumerate(binary_repr(i, len(components))):\n >>> if state:\n >>> print('%s working' % components[c])\n >>> else:\n >>> print('%s not working' % components[c])\n\n As `initstates` may be very sparse, it can be given through a\n dictionnary as follow::\n\n >>> init = {}\n >>> init[0] = 0.8\n >>> init[1] = init[2] = 0.1\n \"\"\"\n\n def __init__(self, components, initstates):\n self.components = tuple(components) #assert order won’t change\n self.matrix = None\n if isinstance(initstates, dict):\n N = len(self.components)\n self.initstates = array([initstates.get(x, 0)\n for x in range(2**N)])\n else:\n self.initstates = array(initstates)\n self._initmatrix()\n self._states = {}\n\n def _initmatrix(self):\n r\"\"\" Given a list of components, this function initialize the markov\n matrix.\n \"\"\"\n N = len(self.components)\n #2^N different states\n #Let’s build the 2^(2N) matrix…\n self.matrix = zeros((2**N, 2**N))\n\n for i in range(2**N):\n currentstate = array([int(x) for x in binary_repr(i, N)])\n for j in range(i+1, 2**N):\n newstate = array([int(x) for x in binary_repr(j, N)])\n tocheck = where(newstate != currentstate) #Components changed\n if len(tocheck[0]) > 1: #Impossible to reach\n continue\n\n component = self.components[tocheck[0][0]]#The changed component\n self.matrix[i, j] = component.lambda_\n self.matrix[j, i] = component.mu\n\n rowsum = self.matrix.sum(axis=1)\n self.matrix[range(2**N), range(2**N)] = -rowsum\n\n def _computestates(self, func):\n r\"\"\" Compute the states described by a function\n\n Parameters\n ----------\n func: function\n a function defining if a state is tracked or not\n\n Returns\n -------\n out: list\n the list of states actually tracked by `func`\n\n Examples\n --------\n >>> A, B, C, D = [Component(i, 1e-3) for i in xrange(4)]\n >>> comp = (A, B, C, D)\n >>> process = Markovprocess(comp, {0:1})\n >>> availablefunc = lambda x: (x[0] or x[1]) and (x[2] or x[3])\n >>> availablestates = process.computestates(states)\n\n This defines, for instance, the following parallel-series system::\n\n | -- A -- | | -- C -- |\n E -- | | -- | | -- S\n | -- B -- | | -- D -- |\n\n * `availablefunc` is the function describing when the system is\n available.\n * `availablestates` is the actual states when the system is\n available. The result is used by the :py:meth:`value` method.\n \"\"\"\n\n N = len(self.components)\n nsquared = 2**N\n states = []\n for x in range(nsquared):\n s = [int(i) for i in binary_repr(nsquared - 1 - x, N)]\n if func(s):\n states.append(x)\n return states\n\n def value(self, t, statefunc=None):\n r\"\"\" Compute the probability of being in some states.\n\n Parameters\n ----------\n t : float\n when the probability must be computed\n state : function\n a function defining the state you want to know the probability\n\n Examples\n --------\n >>> A, B, C, D = [Component(i, 1e-3) for i in xrange(4)]\n >>> comp = (A, B, C, D)\n >>> process = Markovprocess(comp, {0:1})\n >>> availablefunc = lambda x: (x[0] or x[1]) and (x[2] or x[3])\n >>> process.value(100, statefunc=availablefunc)\n 0.98197017562069511\n\n So, at :math:`t = 100`, the probability for the system to be\n available is approximaltly 0.982.\n\n If you want to know, the probability, at :math:`t = 1000` that all\n the components work but the first one, you proceed like that\n\n >>> allbutfirstfunc = lambda x: not x[0] and x[1] and x[2] and x[3]\n >>> allbutfirststates = process.computestates(allbutfirstfunc)\n >>> process.value(1000, states=allbutfirststates)\n 0.031471429479129759\n \"\"\"\n v = self.initstates.dot(expm(t*self.matrix))\n if not statefunc:\n return v\n else:\n try:\n states = self._states[statefunc]\n except KeyError:\n states = self._computestates(statefunc)\n self._states[statefunc] = states\n\n return v[(states, )].sum()\n\n def draw(self, output=None):\n r\"\"\" Print the content of the dot file needed to draw the markov process\n\n Parameters\n ----------\n output : file-like object, optional\n If `output` is given, then the content is written into this\n file. `output` *must* have a :py:meth:`write` method.\n\n Notes\n -----\n Please, see the `Graphviz <http://graphviz.org/>`_ website to have\n more information about how to transform the ouput code into a nice\n picture.\n \"\"\"\n def binstr(x, N):\n \"\"\" Convert `x` to its binary reprensation over N bits\n\n >>> bin(2, 4)\n '0010'\n \"\"\"\n return ''.join([i for i in binary_repr(x, N)])\n\n N = len(self.components)\n nsquared = 2**N\n data = ['digraph G {', '\\trankdir=LR;']\n for i in range(nsquared):\n bini = binstr(nsquared - 1 - i, N)\n for j in range(i, nsquared):\n if not self.matrix[i, j]:\n continue\n\n if i == j:\n data.append('%s -> %s [label = \"%s\"]'\n % (bini, bini, 1.0 + self.matrix[i, j]))\n else:\n binj = binstr(nsquared - 1 - j, N)\n data.append('%s -> %s [label = \"%s\"]'\n % (bini, binj, self.matrix[i, j]))\n data.append('%s -> %s [label = \"%s\"]'\n % (binj, bini, self.matrix[j, i]))\n data.append('}')\n if not output:\n print('\\n'.join(data))\n else:\n try:\n output.write('\\n'.join(data) + '\\n')\n except AttributeError:\n with open(output, 'w') as fobj:\n fobj.write('\\n'.join(data) + '\\n')\n" }, { "alpha_fraction": 0.6840579509735107, "alphanum_fraction": 0.6855072379112244, "avg_line_length": 26.039215087890625, "blob_id": "f8f139b4e932f98908e3618f3e7b6dd35e36fd67", "content_id": "73b29f110014465e876a5c99026d2386695f8974", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1382, "license_type": "no_license", "max_line_length": 78, "num_lines": 51, "path": "/documentation/source/installation.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "Installing / Upgrading\n======================\n.. highlight:: bash\n\n**fiabilipy** is in the `Python Package Index\n<https://pypi.python.org/pypi/fiabilipy/>`_, so it should be quite easy to\ninstall it. Multiple solutions are offered to you.\n\nThe main dependencies of fiabilipy are `numpy and scipy\n<http://www.scipy.org/install.html>`_ and should be installed before.\n\nInstalling with pip\n-------------------\n\nTo install fiabilipy::\n\n $ pip install fiabilipy\n\nTo get a specific version::\n\n $ pip install fiabilipy==2.2\n\nTo upgrade to the last version::\n\n $ pip install --upgrade fiabilipy\n\n\nInstalling on Archlinux\n-----------------------\n\nIf you are using the `Archlinux <https://www.archlinux.org/>`_ GNU/Linux\ndistribution, an AUR package has been made. You can find it `here\n<https://aur.archlinux.org/packages/fiabilipy/>`_. The major interest of using\nthis package, is that it gets upgraded whenever a new version is available and\nyou don’t have to manage dependencies.\n\nInstalling on Windows\n---------------------\n\n.. todo:: write few lines on the windows installation.\n\n\nInstalling from source\n----------------------\n\nIf you would rather install directly from the source (to contribute, for\ninstance), check out for the latest source on our repository::\n\n $ hg clone https://chabotsi.no-ip.org/hg/utc/fiabilipy/\n $ cd fiabilipy\n $ python setup.py install\n\n" }, { "alpha_fraction": 0.49850785732269287, "alphanum_fraction": 0.545577883720398, "avg_line_length": 32.81651306152344, "blob_id": "ecdaa26e7750b72a4c10a26fae406a7ca5a99b83", "content_id": "f7863cb9f4252b9954ffa9561b99f61cf760e55e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7376, "license_type": "no_license", "max_line_length": 80, "num_lines": 218, "path": "/fiabilipy/voter.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#Copyright (C) 2013 Chabot Simon, Sadaoui Akim\n\n#This program is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License along\n#with this program; if not, write to the Free Software Foundation, Inc.,\n#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nr\"\"\" Voter design\n\nThis modude gives tools to design voters and compute some metrics, such as the\nreliability, the availability, the Mean-Time-To-Failure, and so on.\n\n\"\"\"\nfrom builtins import range\n\nfrom sympy import exp, Symbol, oo\nfrom scipy.special import binom\nfrom itertools import combinations, chain\n\nfrom fiabilipy.component import Component\n\n__all__ = ['Voter']\n\nALLSUBSETS = lambda n: (chain(*[combinations(list(range(n)), ni)\n for ni in range(n+1)]))\n\nclass Voter(Component):\n r\"\"\" A voter with identical components having a constant failure rate\n\n This class is used to describe a voter. A voter M out-of N works if\n and only if *at least* M components out of the N avaible work.\n\n Attributes\n ----------\n component: `Component`\n the component to be replicated by the voter\n N: int\n the initial number of components\n M: int\n the minimal number of working components\n lambda_ : float\n the constant failure rate of the voter\n mu : float, optional\n the constant maintainability rate of the voter\n initialy_avaible: boolean, optional\n whether the component is avaible at t=0 or not\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> voter = Voter(motor, 2, 3)\n >>> voter.mttf\n 8333.33333333333\n \"\"\"\n\n def __init__(self, component, M, N, lambda_=0, mu=0, initialy_avaible=True):\n name = '{} out-of {} − {}'.format(M, N, component.name)\n super(Voter, self).__init__(name=name, lambda_=lambda_, mu=mu,\n initialy_avaible=initialy_avaible)\n self.component = component\n self.M = M\n self.N = N\n\n def __repr__(self):\n return u'Voter(%s out-of %s)' % (self.M, self.N)\n\n def _probabilitiescomputation(self, t, method):\n \"\"\" Compute the `method` (reliability, availability, maintainability) of\n a voter, given its components, and the initial number of components\n and the minimal number of components.\n \"\"\"\n prob = 0\n for k in range(self.M, self.N+1):\n a = getattr(self.component, method)(t)**k\n b = (1 - getattr(self.component, method)(t))**(self.N-k)\n prob += binom(self.N, k) * a * b\n return prob\n\n def reliability(self, t):\n r\"\"\" Compute the reliability of the voter at `t`\n\n This method compute the reliability of the voter at `t`.\n\n Parameters\n ----------\n t : float or Symbol\n\n Returns\n -------\n out : float or symbolic expression\n The reliability calculated for the given `t`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> voter = Voter(motor, 2, 3)\n >>> t = Symbol('t', positive=True)\n >>> voter.reliability(t)\n 3.0*(-exp(-0.0001*t) + 1)*exp(-0.0002*t) + 1.0*exp(-0.0003*t)\n >>> voter.reliability(1000)\n 0.974555817870510\n \"\"\"\n ownrel = super(Voter, self).reliability(t)\n return ownrel * self._probabilitiescomputation(t, 'reliability')\n\n def maintainability(self, t):\n r\"\"\" Compute the maintainability of the voter at `t`\n\n This method compute the maintainability of the voter at `t`.\n\n Parameters\n ----------\n t : float or Symbol\n\n Returns\n -------\n out : float or symbolic expression\n The maintainability calculated for the given `t`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> voter = Voter(motor, 2, 3, mu=1e-3)\n >>> t = Symbol('t', positive=True)\n >>> voter.maintainability(t) #doctest: +NORMALIZE_WHITESPACE\n (1.0*(-exp(-0.03*t) + 1.0)**3 + 3.0*(-exp(-0.03*t)\n + 1.0)**2*exp(-0.03*t))*(-exp(-0.001*t) + 1.0)\n >>> voter.maintainability(1000)\n 0.632120558828558\n \"\"\"\n ownrel = super(Voter, self).maintainability(t)\n return ownrel * self._probabilitiescomputation(t, 'maintainability')\n\n def availability(self, t):\n r\"\"\" Compute the availability of the voter at `t`\n\n This method compute the availability of the voter at `t`.\n\n Parameters\n ----------\n t : float or Symbol\n\n Returns\n -------\n out : float or symbolic expression\n The availability calculated for the given `t`\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> voter = Voter(motor, 2, 3, mu=1e-3)\n >>> t = Symbol('t', positive=True)\n >>> voter.availability(t) #doctest: +NORMALIZE_WHITESPACE\n 3.0*(-0.00332225913621263*exp(-0.0301*t) +\n 0.00332225913621265)*(0.00332225913621263*exp(-0.0301*t) +\n 0.996677740863787)**2 + 1.0*(0.00332225913621263*exp(-0.0301*t) +\n 0.996677740863787)**3\n >>> voter.availability(1000)\n 0.999966961120940\n \"\"\"\n ownavail = super(Voter, self).availability(t)\n return ownavail * self._probabilitiescomputation(t, 'availability')\n\n @property\n def mttf(self):\n r\"\"\" Compute the Mean-Time-To-Failure of the voter\n\n The MTTF is defined as :\n :math:`MTTF = \\int_{0}^{\\infty} R(t)dt`\n\n Returns\n -------\n out : float\n The component MTTF\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> voter = Voter(motor, 2, 3)\n >>> voter.mttf\n 8333.33333333333\n \"\"\"\n t = Symbol('t', positive=True)\n return self.reliability(t).integrate((t, 0, oo))\n\n @property\n def mttr(self):\n r\"\"\" Compute the Mean-Time-To-Repair of the voter\n\n The MTTR is defined as :\n :math:`MTTR = \\int_{0}^{\\infty} 1 - M(t)dt`\n\n Returns\n -------\n out : float\n The component MTTR\n\n Examples\n --------\n >>> motor = Component('M', 1e-4, 3e-2)\n >>> voter = Voter(motor, 2, 3, mu=1e-3)\n >>> voter.mttr\n 1000.57547188695\n \"\"\"\n t = Symbol('t', positive=True)\n return (1 - self.maintainability(t)).integrate((t, 0, oo))\n" }, { "alpha_fraction": 0.7216189503669739, "alphanum_fraction": 0.7216189503669739, "avg_line_length": 26.351350784301758, "blob_id": "7400f78f349d3812a041b790394344618c92c54c", "content_id": "9b46d6d3c9eff935886116fcf0bf9a272b00eb09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 79, "num_lines": 37, "path": "/documentation/source/tutorial/index.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "Tutorial\n========\n\nThis tutorial is intended as an introduction to fiabilipy. You will learn to\nbuild some components, to put them together and to compute some reliability\nmetrics. You will also learn how to use the markov representation.\n\nPrerequisites\n-------------\n\nBefore you start, be sure fiabilipy is well installed on your system. In the\npython shell, the following import should run without raising an exception:\n\n.. doctest::\n\n >>> import fiabilipy\n\nIf an exception is raised, check your :doc:`installation <../installation>`.\n\n\nTopics\n------\n\n:doc:`system`\n This tutorial shows you how to build components and how to gather them to\n build a system. You also learn how to access to useful reliability metrics.\n\n:doc:`markov`\n This tutorial shows you how to describe a system with a markov process.\n Then, it shows you how to compute the probability of being in a given state\n (such as *insufficient*, *damaged*, *nominal* and so on).\n\n.. toctree::\n :hidden:\n\n system\n markov\n\n" }, { "alpha_fraction": 0.6185185313224792, "alphanum_fraction": 0.6449735164642334, "avg_line_length": 31.033897399902344, "blob_id": "9dee296fb380980186a98a26f29f7165831b55dd", "content_id": "c56ab2139824ce277c2c2b7492cc30257e0bb112", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1893, "license_type": "no_license", "max_line_length": 82, "num_lines": 59, "path": "/examples/markov.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n#Copyright (C) 2013 Chabot Simon, Sadaoui Akim\n\n#This program is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License along\n#with this program; if not, write to the Free Software Foundation, Inc.,\n#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import print_function\n\nimport pylab as p\n\nfrom fiabilipy import Component, Markovprocess\n\n\ndef markov_example():\n \"\"\" Describe the system as a Markov Process.\n states explanations :\n 0 : A and B working\n 1 : A working and B not working\n 2 : A not working and B working\n 3 : neither A nor B working\n \"\"\"\n A = Component('A', 1e-4, 1.1e-3)\n B = Component('B', 4e-4, 1.4e-3)\n\n components = (A, B)\n\n initstates = {0: 0.8, 1: 0.1, 2: 0.1}\n\n process = Markovprocess(components, initstates)\n\n timerange = range(0, 5000, 10)\n states = {u'nominal' : lambda x: all(x),\n u'dégradé' : lambda x: not(all(x)) and any(x), #at least one but all\n u'défaillant' : lambda x: not(x[0] or x[1]), #none\n u'disponible' : lambda x: any(x), #at least one\n }\n\n for name, state in states.iteritems():\n data = [process.value(t, statefunc=state) for t in timerange]\n p.plot(timerange, data, label=name)\n p.legend()\n p.show()\n\n\nif __name__ == '__main__':\n markov_example()\n" }, { "alpha_fraction": 0.614128053188324, "alphanum_fraction": 0.625165581703186, "avg_line_length": 26.289155960083008, "blob_id": "3b1125cf442f8202b4a9c75a54893373eb62fd17", "content_id": "3a502372da81bb7f7166472cc7fdcec77807a2a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2275, "license_type": "no_license", "max_line_length": 85, "num_lines": 83, "path": "/documentation/source/examples/voter_intersection.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "Voters intersection\n###################\n\nThe older the voters are, the less reliable they are. In their youngness, voters\nare more reliable than single components. The goal of this example is to find\nwhen a voter starts to be less reliable than a single component.\n\n\nStep by step\n------------\n\nLet’s start with a simple 2/3 voters. It’s really easy to get its reliability.\nFirst, we have to import the functions and classes we will use.\n\n.. doctest::\n\n >>> from fiabilipy import Voter, Component\n >>> from sympy import Symbol\n\nLet’s build the voter, with an unknown reliability :math:`\\lambda`.\n\n.. doctest::\n\n >>> l = Symbol('l', positive=True, null=False) #Lambda\n >>> t = Symbol('t', positive=True) #our time variable\n >>> comp = Component('C', l)\n >>> voter = Voter(comp, 2, 3)\n\nHere is the voter, now let’s get its reliability.\n\n.. doctest::\n\n >>> voter.reliability(t)\n 3.0*(1 - exp(-l*t))*exp(-2*l*t) + 1.0*exp(-3*l*t)\n\nTo have a polynomial expression, we substitute :math:`\\exp(-\\lambda t)` to :math:`x`.\nOnce more, this is easy in python…\n\n.. doctest::\n\n >>> from sympy import exp\n >>> x = Symbol('x')\n >>> voter.reliability(t).subs(exp(-l*t), x).nsimplify()\n x**3 + 3*x**2*(-x +1)\n\nUsing this notation, the reliability of a single component is :math:`x`. So to\nfind when the given voter is equivalent to the single component, we simply have\nto solve :math:`x^3 + 3x^2(-x + 1) - x = 0`.\n\n.. doctest::\n\n >>> from sympy import solve\n >>> crossing = (voter.reliability(t) - comp.reliability(t)).nsimplify()\n >>> solve(crossing.subs(exp(-l*t), x))\n [0, 1/2, 1]\n\nAnd, the task is done.\n\nThe complete code\n-----------------\n\nThe code below gives a generic function to solve this problem.\n\n.. code:: python\n\n\n from fiabilipy import Voter, Component\n from sympy import Symbol, solve, exp\n\n def voterintersection(M, N):\n assert 1 < M < N, 'the given voter is not real'\n\n l = Symbol('l', positive=True, null=False)\n t = Symbol('t', positive=True)\n x = Symbol('x')\n\n comp = Component('C', l)\n voter = Voter(comp, M, N)\n\n crossing = (voter.reliability(t) - comp.reliability(t)).nsimplify()\n roots = solve(crossing.subs(exp(-l*t), x))\n\n return roots\n" }, { "alpha_fraction": 0.5760248899459839, "alphanum_fraction": 0.6206538677215576, "avg_line_length": 24.523178100585938, "blob_id": "a01c43cf0e3c087cfd774e02197f02beba213bfe", "content_id": "a60e06956832186f8b477e22f4241e454e489597", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3872, "license_type": "no_license", "max_line_length": 127, "num_lines": 151, "path": "/documentation/source/tutorial/system.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "How to build a system\n=====================\n\nA system is built by putting components together. So, let’s have a look on how\nto build components.\n\nBuilding components\n-------------------\n\nA component is defined as an instance of the\n:class:`~fiabilipy.system.Component` class having a *constant* reliability rate,\nlet’s say :math:`\\lambda = 10^{-4}h^{-1}`.\n\n.. doctest::\n\n >>> from fiabilipy import Component\n >>> from sympy import Symbol\n >>> t = Symbol('t', positive=True)\n >>> comp = Component('C0', 1e-4)\n\nYou have successfully created your first component. `C0` is the name of the\ncomponent ; naming your components will be useful to draw diagrams later.\n\nYou can access to some useful information about your component, such as the\n:abbr:`MTTF (Mean-Time-To-Failure)`, the reliability etc.\n\n.. doctest::\n\n >>> comp.mttf\n 10000.0\n >>> comp.reliability(1000)\n 0.904837418035960\n >>> comp.reliability(t)\n exp(-0.0001*t)\n >>> comp.reliability(t=100)\n 0.990049833749168\n\n\nGather components to build a system\n-----------------------------------\n\nNow you can build components, let’s gather them to build a system. A system is\ndescribed as a graph of components. There are two special components used to\nmaterialize the entry and the exit of the system : `E` and `S`. They are\ncompulsory, don’t forget them.\n\nFor instance, you could create a simple series system of two components as\nfollow:\n\n.. doctest::\n\n >>> from fiabilipy import System\n >>> power = Component('P0', 1e-6)\n >>> motor = Component('M0', 1e-3)\n >>> S = System()\n >>> S['E'] = [power]\n >>> S[power] = [motor]\n >>> S[motor] = 'S'\n\nOnce your system is created, you can access to the data you want, such and\n:abbr:`MTTF (Mean-Time-To-Failure)`, reliability, etc.\n\n.. doctest::\n\n >>> S.mttf\n 1000000/1001 \n >>> float(S.mttf)\n 999.000999000999\n >>> S.reliability(t)\n exp(-1001*t/1000000)\n\nAn example of complex system\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nLet’s build the following system :\n\n.. figure:: images/complex_system.png\n :align: center\n\n A example of _complex_ system\n\n\n.. doctest::\n\n >>> a, b, c, d, e, f, g = [Component('C%i' % i, 1e-4) for i in xrange(7)]\n >>> S = System()\n >>> S['E'] = [a, b, c, g]\n >>> S[a] = S[g] = S[e] = S[d] = 'S'\n >>> S[b] = S[c] = [f]\n >>> S[f] = [e, d]\n\n\nAnd, you can easily access to the data you want, as previously.\n\n.. doctest::\n\n >>> S.mttf\n 331000/21\n >>> S.reliability(t)\n 13*exp(-t/2000) - 12*exp(-t/2500) - exp(-t/5000) - 6*exp(-3*t/5000) + 2*exp(-t/10000) + 4*exp(-3*t/10000) + exp(-7*t/10000)\n\n\nAs you may see, even if the system is complex, it is quite easy to describe it\nwith fiabilipy.\n\nDraw graphics\n-------------\n\nNow you know how to build system with ease, let’s draw some graphics. For\ninstance, reliability versus time.\n\nThe first thing to do, is to import the :py:mod:`pylab` module, which provides a\nlot of function to do mathematical stuff *and* to draw graphics:\n\n.. doctest::\n\n >>> import pylab as p\n\n\nNow, let’s define a simple parallel system with two components.\n\n.. doctest::\n\n >>> a, b = Component('a', 1e-4), Component('b', 1e-6)\n >>> S = System()\n >>> S['E'] = [a, b]\n >>> S[a] = S[b] = 'S'\n\nIn order draw the graphic, we need a time range of study, for instance, from\n:math:`t = 0` to :math:`t = 200`, by steps of :math:`10` (the unit of time is the\none you choose). Once the time range is defined, we compute the reliability of\neach time step:\n\n.. doctest::\n\n >>> timerange = range(0, 20000, 100)\n >>> reliability = [S.reliability(t) for t in timerange]\n\nTo finish, you only have to plot:\n\n.. doctest::\n\n >>> p.plot(timerange, reliability) # doctest: +SKIP\n >>> p.show()\n\nYou can admire the result.\n\n.. figure:: images/reliability_vs_time.png\n :align: center\n\n The reliability graphic\n" }, { "alpha_fraction": 0.4732142984867096, "alphanum_fraction": 0.4732142984867096, "avg_line_length": 21.399999618530273, "blob_id": "5183cbd6e0b34f5cbab155c7143b0aac7b0cf974", "content_id": "bb55541c181ef3886dd7bf301b747c23d25fd9ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 112, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/documentation/source/api/system/voter.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": ":class:`Voter` -- Voter building\n================================\n\n.. autoclass:: fiabilipy.Voter\n :members:\n" }, { "alpha_fraction": 0.614157497882843, "alphanum_fraction": 0.6368893384933472, "avg_line_length": 29.029939651489258, "blob_id": "6bdc6b2fc9b5cc543d0877040c10ca504efd6b33", "content_id": "6558621bc373165fb73f59fe8ffa6d571ee5dfcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5044, "license_type": "no_license", "max_line_length": 82, "num_lines": 167, "path": "/documentation/source/tutorial/markov.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "How to describe a system by a Markov process\n============================================\n\nfiabilipy enables the users to model a system trought a Markov process. This\ntutorial aims to introduce how fiabilipy handles the Markov modelisation. This\nintrocduction is done using a example.\n\nLet’s say we have a parallel-series system, represented by the following figure.\n\n.. figure:: images/markov_system.png\n :align: center\n\n A parallel-series system\n\nWe are interested by the probabilities for the system to be :\n\n* in normal state (i.e. every component works)\n* in damaged state (i.e. one or more component don’t work, but the system does\n work)\n* in faulty state (i.e. the system doesn’t work at all)\n* in available state (i.e. the system is not faulty)\n\nCompoment and process initialisation\n------------------------------------\n\nTo begin, let’s start building the components:\n\n.. doctest::\n\n >>> from fiabilipy import Component, Markovprocess\n >>> A0, A1 = [Component('A{}'.format(i), 1e-4, 1.1e-3) for i in xrange(2)]\n >>> M0, M1 = [Component('M{}'.format(i), 1e-3, 1.2e-2) for i in xrange(2)]\n >>> components = (A0, A1, M0, M1)\n\nTo initialize the process, we need to give to fiabilipy the list of the\ncomponents and the initial states probabilities. A state is defined by a number\nwhich, in base 2, says if the ith component is working or not. So, with 4\ncomponents, we have :math:`2^4 = 16` possible states. The following table\nrepresents the possible stats (W stands for *working* and N for *not working*).\n\n.. table:: states table\n\n ===== == == == ==\n state A0 A1 M0 M1\n ===== == == == ==\n 0 W W W W\n 1 W W W N\n 2 W W N W\n 3 W W N N\n … … … … …\n 15 N N N N\n ===== == == == ==\n\nNow, let’s assume that, at :math:`t = 0`, the probabilities of the system to be\nin state 1 is 0.9 and state 2 is 0.1, thus we have:\n\n.. doctest::\n\n >>> initstates = {0: 0.9, 1:0.1}\n >>> process = Markovprocess(components, initstates)\n\n\nWorking states definition\n-------------------------\n\nNow we have initialized our Markov process, we have to define the states to be\ntracked. This is done a writing a function which return `True` if the given\nstate is tracked `False` otherwise. The functions will be called with one\nargument, let’s say `x`. This variable is a boolean array, the ith case is\n`True` if the ith component is currently working, `False` otherwise.\n\nOk, let’s define the states we want to track.\n\nNormal state\n~~~~~~~~~~~~\n\nIn this state, *every* component has to work. So, in python is can be written\nas:\n\n.. doctest::\n\n >>> def normal(x):\n ... return all(x)\n\nThis function returns `True` if every single item of `x` is `True`.\n\nAvailable state\n~~~~~~~~~~~~~~~\n\nIn this state, the system is available. So, there exists a path of working\ncomponents for `E` to `S`. That is to say either A0 or A1\nare working and M0 or M1 are. So, the function describing the faulty state may be:\n\n.. doctest::\n\n >>> def available(x):\n ... return (x[0] or x[1]) and (x[2] or x[3])\n\nDamaged state\n~~~~~~~~~~~~~\n\nActually, when you have described what the available state is, you have made the\nharder part. Because, the other states can be described as combinations of it.\nFor instance, the system is damaged when the system is in available state *and\nnot* in the normal state. Therefore:\n\n.. doctest::\n\n >>> def damaged(x):\n ... return available(x) and not(normal(x))\n\nFaulty state\n~~~~~~~~~~~~\n\nThe system is faulty when not available. So, it’s quite simply to describe:\n\n.. doctest::\n\n >>> def faulty(x):\n ... return not available(x)\n\nCompute the probabilities\n-------------------------\n\nNow you have written the functions describing the states, it is really simple to\nask fiabilipy the probabilities you want. For instance, to know the probability\nof the system being available at :math:`t = 150h`, simply write:\n\n.. doctest::\n\n >>> process.value(150, available)\n 0.97430814090407503\n\nAt :math:`t = 1000h`, the probability that every component is still working is:\n\n.. doctest::\n\n >>> process.value(1000, normal)\n 0.30900340684254302\n\nDrawing plots\n~~~~~~~~~~~~~\n\nNow you are able to compute the probabilities you want, for the states you want,\nfor the time you want, let’s plot those probabilities. The following code gives\nyou a example of how to plot the variation of the probabilities.\n\n.. doctest::\n\n >>> import pylab as p\n >>> states = {u'normal': normal,\n ... u'available': available,\n ... u'damaged': damaged,\n ... u'faulty': faulty,\n ... }\n >>> timerange = range(0, 6000, 10)\n >>> for (name, func) in states.iteritems():\n ... proba = [process.value(t, func) for t in timerange]\n ... p.plot(timerange, proba, label=name)\n >>> p.legend()\n >>> p.show()\n\n\nAnd, this code gives you the following figure:\n\n.. figure:: images/markov_proba.png\n :align: center\n" }, { "alpha_fraction": 0.6581858396530151, "alphanum_fraction": 0.7654867172241211, "avg_line_length": 35.15999984741211, "blob_id": "44071da9a754db83c6f97c5ab0f646d9df563230", "content_id": "44e2bc55ee2ea29f987c9b9c30af15e32c8cfade", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 904, "license_type": "no_license", "max_line_length": 143, "num_lines": 25, "path": "/package/PKGBUILD", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "# Maintainer: Simon Chabot <simon dot chabot at fiabilipy dot org>\n# Maintainer: Akim Sadaoui <akim dot sadaoui at fiabilipy dot org>\npkgname=fiabilipy\npkgver=2.7\npkgrel=1\npkgdesc=\"A reliability engineering sofware with educational purposes\"\narch=('any')\nurl=\"http://fiabilipy.org\"\nlicense=('GPL')\ndepends=('python2-scipy' 'python2-numpy' 'python2-sympy' 'python2-networkx'\n 'python2-decorator')\noptdepends=('graphviz: draw the reliability diagrams'\n 'python2-matplotlib: plot the computed probabilities')\nprovides=('fiabilipy=2.7')\nchangelog=ChangeLog\nsource=(https://pypi.python.org/packages/source/f/$pkgname/$pkgname-$pkgver.tar.gz)\nsha512sums=('dc7419fe639b951688535baeea325d0b11a313ff909fa346c992decef8fa8ba10d7534345fd194c23fd12dcd066b4ac8d3465d75255b3a62c4ac11df13887144')\n\n\npackage() {\n\tcd \"$srcdir/$pkgname-$pkgver\"\n python2 setup.py install --root=$pkgdir\n}\n\n# vim:set ts=2 sw=2 et:\n" }, { "alpha_fraction": 0.6572279930114746, "alphanum_fraction": 0.6684053540229797, "avg_line_length": 21.74576187133789, "blob_id": "f4fb110a30795282d0cf88568bfe35e07d2277ef", "content_id": "2d252ef7b543e2e0e697349fdb4d64b154428a89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1344, "license_type": "no_license", "max_line_length": 78, "num_lines": 59, "path": "/documentation/source/changelog.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "ChangeLog\n=========\n\nV2.4\n----\n\n* A better cache system.\n* A documentation in English, using `Sphinx <http://sphinx.pocoo.org/>`_.\n* Add some metrics computation. One can compute maintainability, and therefore\n availability, for complex systems and voters.\n* An Archlinux `package <https://aur.archlinux.org/packages/fiabilipy/>`_ is\n made.\n* The code has been cleaned. (thank you `pylint <http://pylint.org/>`_ ;))\n* Symbolic computation can be performed, using :mod:`sympy`\n\n\n.. doctest::\n\n >>> from sympy import symbols\n >>> from fiabilipy import Component\n >>> l, t = symbols('l t', positive=True)\n >>> a = Component('a', lambda_=l)\n >>> a.reliability(t)\n exp(-l*t)\n\n\nV2.3.1\n------\n\n* Update the module name from :mod:`fiabili` to :mod:`fiabilipy`.\n\nV2.3\n----\n\n* Create a `pypi package <https://pypi.python.org/pypi/fiabilipy/>`_.\n* Update the documentation.\n* A Markov graph can be drawn.\n\n\nV2.2\n----\n\n* Add a :mod:`markov` module.\n\nV0.2\n----\n\n* Some metrics are cached to faster the computation (:abbr:`MTTF\n (Mean-Time-To-Failure)`, :abbr:`MTTR (Mean-Time-To-Repair)`, etc).\n* A documentation is started (in French… sorry).\n\nV0.1\n----\n\n* *Show time*.\n* System, Component and voter can be built.\n* Reliability can be computed.\n* :abbr:`MTTF (Mean-Time-To-Failure)` can be computed.\n* Compute the minimal cuts of order 1 and 2.\n" }, { "alpha_fraction": 0.6866840720176697, "alphanum_fraction": 0.7349869608879089, "avg_line_length": 29.639999389648438, "blob_id": "7f8d5cfb65f267465c47f07db2f252e4c5cc489f", "content_id": "c7ce19c0836d08be5619b3f3a50301eca89a3b83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1534, "license_type": "no_license", "max_line_length": 103, "num_lines": 50, "path": "/CHANGELOG.md", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "2016-08-27 Vincent Lecrubier <vincent dot lecrubier at gmail dot com>\n\n\t* 3.0 :\n\tPort to python3\n\tnetworkx representation is based on names of components instead of component instances themselves\n addition of the `_map` attribute on `systems` in order to map component names to component themselves\n\n\n2013-10-08 Akim Sadoui <akim dot sadoui at etu dot utc dot fr>\n\n\t* 2.4 :\n\tA better cache system.\n\tA documentation in English, using Sphinx (http://sphinx.pocoo.org/).\n\tAdd some metrics computation. One can compute maintainability, and therefore\n\tavailability, for complex systems and voters.\n\tFirst release as Archlinux package.\n\tThe code has been cleaned. (thank you pylint ;))\n\tSymbolic computation can now be performed, using sympy.\n\n2013-10-08 Simon Chabot <simon dot chabot at chabotsi dot fr>\n\n\t* 2.3.1 :\n\tUpdate the module name from fiabili to fiabilipy.\n\n2013-10-08 Simon Chabot <simon dot chabot at chabotsi dot fr>\n\n\t* 2.3 :\n\tCreate a pypi package (https://pypi.python.org/pypi/fiabilipy/).\n\tUpdate the documentation.\n\tA Markov graph can be drawn.\n\n2013-10-08 Simon Chabot <simon dot chabot at chabotsi dot fr>\n\n\t* 2.2 :\n\tAdd a markov module.\n\n2013-10-08 Simon Chabot <simon dot chabot at chabotsi dot fr>\n\n\t* 0.2 :\n\tSome metrics are cached to make the computation faster (MTTF, MTTR, etc).\n\tA documentation is started (in French… sorry).\n\n2013-10-08 Simon Chabot <simon dot chabot at chabotsi dot fr>\n\n\t* 0.1 :\n\t*Show time*.\n\tSystem, Component and voter can be built.\n\tReliability can be computed.\n\tMTTF can be computed.\n\tCompute the minimal cuts of order 1 and 2.\n" }, { "alpha_fraction": 0.4887218177318573, "alphanum_fraction": 0.4887218177318573, "avg_line_length": 25.399999618530273, "blob_id": "dcee1df675a7533e5ba1237722b425f3b29d8f76", "content_id": "82ba1c93e6e5e8d85e177fda6fa08d35327ab5a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 133, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/documentation/source/api/system/component.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": ":class:`Component` -- Compoment building\n========================================\n\n.. autoclass:: fiabilipy.Component\n :members:\n\n" }, { "alpha_fraction": 0.7014435529708862, "alphanum_fraction": 0.7093175649642944, "avg_line_length": 24.830509185791016, "blob_id": "d47d9bafc352349a2ee6d38982503a5259bcc740", "content_id": "8ff79cd56c58be6c6d9ad51feb276b2cc012a221", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 1528, "license_type": "no_license", "max_line_length": 79, "num_lines": 59, "path": "/documentation/source/index.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": ".. fiabilipy documentation master file, created by\n sphinx-quickstart2 on Wed Oct 2 10:36:13 2013.\n You can adapt this file completely to your liking, but it should at least\n contain the root `toctree` directive.\n\nWelcome to fiabilipy's documentation!\n=====================================\n\nOverview\n--------\n\n**fiabilipy** is a python package providing some functions to learn engineering\nreliability at university. With this package, one can build easily some\ncomponents, put them together to build a complete system and finally evaluate\nsome metrics (reliability, maintainability, Mean-Time-To-Failure, and so on).\n\n**fiabilipy** also provides tools to describe a system by a Markov process and\nevaluating the probability of being in a given state.\n\n:doc:`installation`\n Instruction on how to get fiabilipy\n\n:doc:`tutorial/index`\n Start here to learn how to make your first system.\n\n:doc:`examples/index`\n Some examples on how to perform specific tasks.\n\n:doc:`api/index`\n The complete API documentation, organized by modules.\n\nContributing\n------------\n\nThis software is free - as in a speech - therefore, all contributions are\nencouraged, from minor tweak to pull request or just a little message ;)\n\nChanges\n-------\n\nSee the :doc:`changelog` for a full list of changes offered by each version.\n\n\nIndices and tables\n------------------\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n\n.. toctree::\n :hidden:\n\n installation\n tutorial/index\n examples/index\n api/index\n changelog\n" }, { "alpha_fraction": 0.5845771431922913, "alphanum_fraction": 0.5920398235321045, "avg_line_length": 33.956520080566406, "blob_id": "74c2dc6e20e13b92c086bea9267892d1883db346", "content_id": "5b389b1a6cb44615dc49aa328475e64462586429", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 804, "license_type": "no_license", "max_line_length": 87, "num_lines": 23, "path": "/setup.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nsetup(name='fiabilipy',\n version='3.0',\n description='Learn engineering reliability with python',\n long_description=open('README.md').read(),\n author='Simon Chabot, Akim Sadaoui, Vincent Lecrubier',\n author_email='[email protected]',\n url='http://fiabilipy.org',\n license='GPLv2+',\n keywords=('dependability', 'availability', 'reliability', 'markov'),\n requires=['numpy', 'scipy', 'sympy', 'networkx','future'],\n packages=['fiabilipy'],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Education',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Topic :: Scientific/Engineering',\n ]\n )\n" }, { "alpha_fraction": 0.5756374001502991, "alphanum_fraction": 0.6050991415977478, "avg_line_length": 30.51785659790039, "blob_id": "0db4228a78b922751ec0de5da6f2e60c66308c7b", "content_id": "85a043c8aff8ad48f88f4df4116aefbb60a68ad4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1766, "license_type": "no_license", "max_line_length": 72, "num_lines": 56, "path": "/examples/system.py", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\n#Copyright (C) 2013 Chabot Simon, Sadaoui Akim\n\n#This program is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#This program is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License along\n#with this program; if not, write to the Free Software Foundation, Inc.,\n#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\nfrom __future__ import print_function\n\nfrom fiabilipy import Component, System\nfrom matplotlib.pylab import plot, show\n\n\ndef system_example():\n r\"\"\" Describe the system as a reliability block diagram\n\n | -- A0 -- | -- M0 -- |\n | / |\n E -- | -- A1 -- | | -- S\n | \\ |\n | -- A2 -- | -- M1 -- |\n \"\"\"\n\n alim = [Component('A_%s' % i, 2e-4) for i in xrange(3)]\n motors = [Component('M_%s' % i, 1e-4) for i in xrange(2)]\n S = System()\n\n S['E'] = [alim[0], alim[1], alim[2]]\n S[alim[0]] = [motors[0]]\n S[alim[1]] = [motors[0], motors[1]]\n S[alim[2]] = [motors[1]]\n S[motors[0]] = 'S'\n S[motors[1]] = 'S'\n\n print('The MTTF of the system is :', S.mttf)\n\n timerange = range(0, 2*365*24, 100) # 2 years study\n reliability = [S.reliability(t) for t in timerange]\n plot(timerange, reliability)\n show()\n\n\nif __name__ == '__main__':\n system_example()\n" }, { "alpha_fraction": 0.4879518151283264, "alphanum_fraction": 0.4879518151283264, "avg_line_length": 32.20000076293945, "blob_id": "03adffdf556ffef2583dfce1c0a56ba7861b27fc", "content_id": "c1c563d3ade4a1e90abd79b65d99ef1dbf80a3ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 166, "license_type": "no_license", "max_line_length": 55, "num_lines": 5, "path": "/documentation/source/api/markov/index.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": ":mod:`markov` -- Markov system building and description\n=======================================================\n\n.. autoclass:: fiabilipy.Markovprocess\n :members:\n" }, { "alpha_fraction": 0.4363636374473572, "alphanum_fraction": 0.4424242377281189, "avg_line_length": 17.33333396911621, "blob_id": "c11880d392eb2be95d7f8c4810b4c7e56a1f3c66", "content_id": "323e84389f72bcf0e3e8a8fac20d451a68da8f34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 165, "license_type": "no_license", "max_line_length": 48, "num_lines": 9, "path": "/documentation/source/api/system/index.rst", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": ":mod:`system` -- System building and description\n================================================\n\n.. toctree::\n :maxdepth: 2\n\n component\n voter\n system\n" }, { "alpha_fraction": 0.7740259766578674, "alphanum_fraction": 0.7753247022628784, "avg_line_length": 31.08333396911621, "blob_id": "03e98aa0c410b8578d69ab6b5ef4c6740545c3fa", "content_id": "80a793aa1336d97d18194a4360e21523990abe41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 770, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/README.md", "repo_name": "crubier/Fiabilipy", "src_encoding": "UTF-8", "text": "# Fiabilipy\n\nPort to python 3 by [crubier](http://crubier.net)\n\n## Overview\n\n**fiabilipy** is a python package providing some functions to learn engineering\nreliability at university. With this package, one can build easily some\ncomponents, put them together to build a complete system and finally evaluate\nsome metrics (reliability, maintainability, Mean-Time-To-Failure, and so on).\n\n**fiabilipy** also provides tools to describe a system by a Markov process and\nevaluating the probability of being in a given state.\n\n## Contributing\n\n\nThis software is free - as in a speech - therefore, all contributions are\nencouraged, from minor tweak to pull request or just a little message ;)\n\n## Changes\n\n\nSee `CHANGELOG.md` for a full list of changes offered by each version.\n" } ]
27
catli/student-lesson-optimizer
https://github.com/catli/student-lesson-optimizer
caf288ef31b1650c22e4178b18618801d1c0b0df
1e6b052d536e000423f498d83b66204df95f9d73
3fd355b33cdb78ea2697432baf82190e17e17b87
refs/heads/master
2020-04-30T03:57:32.570733
2019-04-16T18:33:50
2019-04-16T18:33:50
176,599,139
0
0
null
2019-03-19T21:12:50
2019-04-15T23:24:24
2019-04-16T18:33:51
Jupyter Notebook
[ { "alpha_fraction": 0.6230445504188538, "alphanum_fraction": 0.6291000843048096, "avg_line_length": 41.45000076293945, "blob_id": "c0a677197cde110eee59d8e3fa82f66b557e79e1", "content_id": "dcbfa1ef8b1fe356c71ff6895d6db7f5b49ea6e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5945, "license_type": "no_license", "max_line_length": 86, "num_lines": 140, "path": "/model/evaluate.py", "repo_name": "catli/student-lesson-optimizer", "src_encoding": "UTF-8", "text": "\n'''\n To evaluate the loss of prediction on validation data\n'''\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport torch.utils.data as Data\nimport numpy as np\nfrom process_data import split_train_and_test_data, convert_token_to_matrix\nimport random\nimport csv\nimport pdb\n\n\n\ndef evaluate_loss(model, val_data, loader, val_keys, content_dim):\n '''\n # output_sample_filename, epoch, exercise_to_index_map, \n # perc_sample_print, ):\n set in training node\n perc_sample_print = 0.05 # set the percent sample\n '''\n model.eval()\n val_loss = []\n total_predicted = 0\n total_label = 0\n total_correct = 0\n total_no_predicted = 0\n total_sessions = 0\n for step, batch_x in enumerate(loader): # batch_x: index of batch data\n print('Evaluate Loss | Iteration: ', step+1)\n # convert token data to matrix\n # need to convert batch_x from tensor flow object to numpy array\n # before converting to matrix\n\n input_padded, label_padded, label_mask, seq_lens = convert_token_to_matrix(\n batch_x[0].numpy(), val_data, val_keys, content_dim)\n # Variable, used to set tensor, but no longer necessary\n # Autograd automatically supports tensor with requires_grade=True\n # https://pytorch.org/docs/stable/autograd.html?highlight=autograd%20variable\n padded_input = Variable(torch.Tensor(\n input_padded), requires_grad=False) # .cuda()\n padded_label = Variable(torch.Tensor(\n label_padded), requires_grad=False) # .cuda()\n padded_mask = Variable(torch.Tensor(\n label_mask), requires_grad=False) # .cuda()\n\n # clear gradients and hidden state\n model.hidden = model.init_hidden()\n # is this equivalent to generating prediction\n # what is the label generated?\n\n y_pred = model(padded_input, seq_lens) # .cuda()\n loss = model.loss(y_pred, padded_label, padded_mask) # .cuda()\n # append the loss after converting back to numpy object from tensor\n val_loss.append(loss.data.numpy())\n threshold_output, correct_ones = find_max_predictions(\n y_pred, label_mask, input_padded, content_dim) # .cuda()\n total_predicted += len(torch.nonzero(threshold_output))\n total_label += len(torch.nonzero(padded_label))\n total_correct += len(torch.nonzero(correct_ones))\n # total_no_predicted += num_no_pred\n total_sessions += np.sum(seq_lens)\n \n average_loss = np.mean(val_loss)\n # of label=1 that were predicted accurately\n return average_loss, total_predicted, total_label, \\\n total_correct, total_sessions\n\n\ndef mask_padded_errors(threshold_output, seq_lens):\n # [TODO SLO]: \n # (1) move mask_padded_error to loss\n num_no_pred = 0\n for i, output in enumerate(threshold_output):\n # the full size of threshold\n num_sess = threshold_output[i].shape[0]\n seq_len = seq_lens[i]\n # calculate the number of sessions with no prediction\n sess_with_pred = np.sum(\n threshold_output[i][:seq_len, ].detach().numpy(),\n axis=1)\n num_no_pred += int(np.sum(sess_with_pred == 0))\n threshold_output[i][:seq_len, ]\n for sess_i in range(seq_len, num_sess):\n threshold_output[i][sess_i] = 0\n return threshold_output, num_no_pred\n\n\n\n\ndef find_max_predictions(output, label, input_padded, content_dim):\n '''\n for each session, select k matching content where k is\n the number of content the student actual completed\n the k matching content is based on predict score growth\n\n once generated, compare the predicted and the actual content\n and return location of correct predictions\n '''\n rel_thresh_output = torch.zeros(output.shape)\n for stud, _ in enumerate(output):\n # init total correct and total answer, sum up from input_padded\n num_corrects = np.zeros(content_dim)\n num_answers = np.zeros(content_dim)\n for sess, _ in enumerate(output[stud]):\n # add the number of correct answers and total answers\n # from the previous session (in input padded)\n num_answers += input_padded[stud, sess, :content_dim]\n # num correct = num_answers * perc_correct\n num_corrects += (input_padded[stud, sess, :content_dim]*\n input_padded[stud, sess, content_dim:])\n # number of predicted activity will match actual number completed\n # assume that students will complete the same number of activities\n # in this prediction scenario\n k = torch.sum(label[stud, sess]>0) # number of content completed\n if k==0:\n continue\n else:\n # create the denominator from num answers\n denom = num_answers.copy()\n denom[denom==0] = 1\n mastery = np.divide(num_corrects, denom)\n growth_vals = output[stud, sess].detach().numpy() - mastery\n # pick the threshold for k-th highest growth threshold\n rel_thresh = sorted(growth_vals)[-k] # threshold of content\n # if the output greater growth threshold, set to 1\n # otherwise, all other skills set to 0\n rel_thresh_output[stud, sess] = torch.tensor((\n growth_vals >=rel_thresh).astype('float'))\n # find the difference between label and prediction\n # where prediction is incorrect (label is one and\n # threshold output 0), then the difference would be 1\n predict_diff = label - rel_thresh_output\n # if label =1 and threshold output =0\n incorrect_ones = F.threshold(predict_diff, 0.999, 0)\n # if label = 1 and incorrect = 0\n correct_ones = label - incorrect_ones\n return rel_thresh_output, correct_ones\n\n" }, { "alpha_fraction": 0.6298397183418274, "alphanum_fraction": 0.6414303183555603, "avg_line_length": 38.75490188598633, "blob_id": "89c607b4df2d98b903ceb74c05674be98df1c7bd", "content_id": "191a833a132cc491971a82876f05310c76e72aa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4055, "license_type": "no_license", "max_line_length": 96, "num_lines": 102, "path": "/model/unit_test.py", "repo_name": "catli/student-lesson-optimizer", "src_encoding": "UTF-8", "text": "# Unit test: run with pytest\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as Data\nfrom gru import GRU_MODEL as gru_model\nfrom process_data import split_train_and_test_data, convert_token_to_matrix, extract_content_map\nfrom train import train\nfrom evaluate import evaluate_loss\nimport pdb\n\n\ndef test_train_and_evaluate():\n exercise_filename = 'data/fake_tokens'\n content_index_filename = 'data/exercise_index_all'\n train_keys, val_keys, full_data = split_train_and_test_data(\n exercise_filename, content_index_filename, 0)\n exercise_to_index_map, content_dim = extract_content_map(\n content_index_filename)\n input_dim = content_dim*2\n model = gru_model(input_dim=input_dim,\n output_dim=content_dim,\n nb_lstm_layers=1,\n nb_lstm_units=50,\n batch_size=1)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n data_index = torch.IntTensor(range(len(train_keys)))\n torch_data_index = Data.TensorDataset(data_index)\n loader = Data.DataLoader(dataset=torch_data_index,\n batch_size=1,\n drop_last=True)\n train(model, optimizer, full_data, loader, train_keys, epoch = 1,\n content_dim = content_dim)\n eval_loss, total_predicted, total_label, total_correct, \\\n total_sessions = evaluate_loss(model, full_data,\n loader, train_keys, content_dim)\n epoch_result = 'Epoch %d unit test: %d / %d precision \\\n and %d / %d recall with %d sessions \\n' % (\n 1, total_correct, total_predicted,\n total_correct, total_label, total_sessions)\n print(epoch_result)\n assert model, \"UH OH\"\n print(\"PASS UNIT TEST\")\n\n\ndef test_train_split():\n # make sure the training validiton split\n # is working as expected\n exercise_filename = 'data/fake_tokens'\n content_index_filename = 'data/exercise_index_all'\n train_keys, val_keys, full_data = split_train_and_test_data(\n exercise_filename, content_index_filename, 0.2)\n assert len(train_keys) == 4\n assert len(val_keys) == 1\n print(\"PASS TRAIN VALIDATION SPLIT\")\n\n\ndef test_convert_token_to_matrix():\n # test a couple of things\n # (1) number of student in batch match expected\n # (2) the number of session for the first student match expected\n # (2) the number of activities for the first session match expected\n # (3) the perc correct match expected\n exercise_filename = 'data/fake_tokens'\n content_index_filename = 'data/exercise_index_all'\n train_keys, val_keys, full_data = split_train_and_test_data(\n exercise_filename, content_index_filename, 0)\n exercise_to_index_map, content_dim = extract_content_map(\n content_index_filename)\n\n batch = np.array([0,1])\n batch_train_keys = [key[0] for key in train_keys[0:2]]\n\n input_padded, label_padded, label_mask, seq_lens = convert_token_to_matrix(\n batch, full_data, train_keys, content_dim)\n\n assert len(input_padded) == len(batch)\n assert len(label_padded) == len(batch)\n assert len(label_mask) == len(batch)\n print(\"PASS BATCH NUM\")\n\n student_data = full_data[batch_train_keys[0]]\n assert len(input_padded[0,:,:]) == len(student_data)-1\n assert len(label_padded[0,:,:]) == len(student_data)-1\n assert len(label_mask[0,:,:]) == len(student_data)-1\n print(\"PASS SESS NUM\")\n\n student_data = full_data[batch_train_keys[0]]\n first_sesh_skills = np.unique([x[0] for x in student_data['1']])\n sec_sesh_skills = np.unique([x[0] for x in student_data['2']])\n assert np.sum(input_padded[0,0,:content_dim]>0) == len(first_sesh_skills)\n assert np.sum(label_padded[0,0,:]>0) == len(sec_sesh_skills)\n assert np.sum(label_mask[0,0,:]) == len(sec_sesh_skills)\n print(\"PASS SKILL NUM\")\n\n\n\n# if __name__ == '__main__':\n# # set hyper parameters\n# test_train_split()\n# test_convert_token_to_matrix()\n# test_train_and_evaluate()\n" }, { "alpha_fraction": 0.6057897210121155, "alphanum_fraction": 0.6100558638572693, "avg_line_length": 40.025001525878906, "blob_id": "5fe3fa8fa60dc02a288603055651bf201eb18469", "content_id": "bfc582024b46c098e86556fa034d4322693a33cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9845, "license_type": "no_license", "max_line_length": 96, "num_lines": 240, "path": "/model/predict.py", "repo_name": "catli/student-lesson-optimizer", "src_encoding": "UTF-8", "text": "'''\n Read in test data and predict sessions from existing models\n'''\nimport torch\nimport torch.utils.data as Data\nfrom torch.nn import functional as F\nimport yaml\nimport os\nimport numpy as np\nfrom process_data import convert_token_to_matrix, split_train_and_test_data, extract_content_map\n# from evaluate import find_max_predictions\nfrom torch.autograd import Variable\nfrom gru import GRU_MODEL as gru_model\nimport pdb\n\n\ndef predict_sessions(model, full_data, keys, content_dim, threshold, output_filename,\n exercise_to_index_map, include_correct):\n '''\n create recommended session for each session\n '''\n data_index = torch.IntTensor(range(len(keys)))\n torch_data_index = Data.TensorDataset(data_index)\n loader = Data.DataLoader(dataset=torch_data_index,\n batch_size=1,\n num_workers=2)\n output_writer = open(output_filename, 'w')\n output_writer.write('student' + '\\t' +\n 'last_session' + '\\t' +\n 'predicted' + '\\t' +\n 'actual' + '\\t' +\n 'correct' + '\\n')\n\n for step, batch in enumerate(loader):\n # assume we are not batching the data\n # only one student value relevant\n # returns (student_id, num_sess), only first value used\n student = keys[batch[0]][0]\n # grab all the sessions for a student\n sessions = sorted(full_data[student].keys())\n # convert token data to matrix\n\n # [TODO SLO]: do we need to incorporate label_mask\n input_padded, label_padded, label_mask, seq_lens = convert_token_to_matrix(\n batch[0].numpy(), full_data, keys, content_dim)\n padded_input = Variable(torch.Tensor(\n input_padded), requires_grad=False) # .cuda()\n padded_label = Variable(torch.Tensor(\n label_padded), requires_grad=False) # .cuda()\n masked_label = torch.Tensor(label_mask)\n model.init_hidden()\n y_pred = model(padded_input, seq_lens) # .cuda()\n threshold_output, correct_ones = find_max_predictions(\n y_pred, masked_label, input_padded, content_dim)\n writer_sample_output(output_writer, student, sessions, padded_input,\n threshold_output, padded_label, correct_ones,\n exercise_to_index_map, include_correct)\n output_writer.close()\n\n\n\n\ndef find_max_predictions(output, label, input_padded, content_dim):\n '''\n compare the predicted list and the actual rate\n then generate the locaation of correct predictions\n '''\n # [TODO SLO]: \n # (1) Update_max_prediction to find top growth skills\n # find k number of skills where k is the number of\n # skills actually worked on in next sessions\n # growth is the biggest jump from previous state\n # to next state, create a running tally of the\n # perc correct for each skill\n\n # set the relative threshold output to zero\n rel_thresh_output = torch.zeros(output.shape)\n for stud, _ in enumerate(output):\n # init total correct and total answer, sum up from input_padded\n num_corrects = np.zeros(content_dim)\n num_answers = np.zeros(content_dim)\n for sess, _ in enumerate(output[stud]):\n # add the number of correct answers and total answers\n # from the previous session (in input padded)\n num_answers += input_padded[stud, sess, :content_dim]\n # num correct = perc_correct * num_answers\n num_corrects += (input_padded[stud, sess, :content_dim]*\n input_padded[stud, sess, content_dim:])\n # number of predicted activity will match actual number completed\n # assume that students will complete the same number of activities\n # in this prediction scenario\n k = torch.sum(label[stud, sess]>0) # number of content completed\n if k==0:\n continue\n else:\n # create the denominator from num answers\n denom = num_answers.copy()\n denom[denom==0] = 1\n mastery = np.divide(num_corrects, denom)\n growth_vals = output[stud, sess].detach().numpy() - mastery\n # pick the threshold for k-th highest growth threshold\n rel_thresh = sorted(growth_vals)[-k] # threshold of content\n # if the output greater growth threshold, set to 1\n # otherwise, all other skills set to 0\n rel_thresh_output[stud, sess] = torch.tensor((\n growth_vals >=rel_thresh).astype('float'))\n # find the difference between label and prediction\n # where prediction is incorrect (label is one and\n # threshold output 0), then the difference would be 1\n predict_diff = label - rel_thresh_output\n # set_correct_to_one = F.threshold(0.99, 0)\n incorrect_ones = F.threshold(predict_diff, 0.999, 0)\n correct_ones = label - incorrect_ones\n return rel_thresh_output, correct_ones\n\n\ndef writer_sample_output(output_writer, student, sessions, padded_input,\n threshold_output, padded_label, correct_ones,\n exercise_to_index_map, include_correct):\n '''\n Randomly sample batches, and students with each batch\n to write data\n [REFORMAT TODO] turn into class and split write student iter\n '''\n index_to_exercise_map = create_index_to_content_map(exercise_to_index_map)\n # iterate over students\n stud_input = padded_input[0]\n actual = padded_label[0]\n prediction = threshold_output[0]\n correct = correct_ones[0]\n write_student_sample(output_writer, student, sessions, stud_input,\n actual, prediction, correct,\n index_to_exercise_map, include_correct)\n\n\ndef write_student_sample(sample_writer, student, sessions, stud_input,\n actual, prediction, correct, index_to_content_map,\n include_correct):\n '''\n print readable prediciton sample\n for input, output, label expect a matrix that's already\n converted to ones where value above threshold set to 1\n '''\n content_num = len(index_to_content_map)\n for i, label in enumerate(actual):\n student_session = student + '_' + sessions[i]\n # pass over the first one, no prediction made\n if i == 0:\n continue\n if include_correct:\n readable_input = create_readable_list_with_correct(\n stud_input[i], index_to_content_map, content_num)\n else:\n readable_input = create_readable_list(\n stud_input[i], index_to_content_map)\n readable_output = create_readable_list(\n prediction[i], index_to_content_map)\n readable_label = create_readable_list(\n label, index_to_content_map)\n readable_correct = create_readable_list(\n correct[i], index_to_content_map)\n sample_writer.write(student_session + '\\t' +\n str(readable_input) + '\\t' +\n str(readable_output) + '\\t' +\n str(readable_label) + '\\t' +\n str(readable_correct) + '\\n')\n\n\ndef create_readable_list(vect, index_to_content_map):\n '''\n create the readable list of cotent\n '''\n content_list = []\n indices = np.where(vect > 0.01)[0]\n for index in indices:\n content_list.append(index_to_content_map[index+1])\n return content_list\n\n\ndef create_readable_list_with_correct(vect, index_to_content_map, content_num):\n '''\n create the readable list of cotent\n '''\n content_list = []\n indices = np.where(vect[:content_num-1] > 0.01)[0]\n for index in indices:\n content = index_to_content_map[index+1]\n perc_correct = vect[content_num + index].numpy()\n content_list.append((content, str(perc_correct)))\n return content_list\n\n\ndef create_index_to_content_map(content_index):\n '''\n Reverse the content name to index map\n '''\n index_to_content_map = {}\n for content in content_index:\n index = content_index[content]\n index_to_content_map[index] = content\n return index_to_content_map\n\n\n\ndef run_inference():\n print('start')\n loaded_params = yaml.load(open('input/predict_params.yaml', 'r'))\n model_filename = loaded_params['model_filename']\n nb_lstm_units = loaded_params['nb_lstm_units']\n nb_lstm_layers = loaded_params['nb_lstm_layers']\n threshold = loaded_params['threshold']\n batchsize = loaded_params['batchsize']\n include_correct = loaded_params['include_correct']\n exercise_filename = os.path.expanduser(\n loaded_params['exercise_filename'])\n output_filename = os.path.expanduser(\n loaded_params['output_filename'])\n content_index_filename = loaded_params['content_index_filename']\n # creat ethe filename\n file_affix = model_filename\n print(file_affix)\n exercise_to_index_map, content_dim = extract_content_map(\n content_index_filename)\n keys, _, full_data, = split_train_and_test_data(exercise_filename,\n content_index_filename, test_perc=0)\n # run the gru model\n input_dim = content_dim*2\n\n model = gru_model(input_dim=input_dim,\n output_dim=content_dim,\n nb_lstm_layers=nb_lstm_layers,\n nb_lstm_units=nb_lstm_units,\n batch_size=batchsize)\n model.load_state_dict(torch.load( model_filename ))\n predict_sessions(model, full_data, keys, content_dim, threshold,\n output_filename, exercise_to_index_map, include_correct)\n\n\nif __name__ == '__main__':\n run_inference()" }, { "alpha_fraction": 0.6262599229812622, "alphanum_fraction": 0.6314323544502258, "avg_line_length": 38.25520706176758, "blob_id": "ac395321c17f042fc80ca705d0b96380e2648c7d", "content_id": "3fe857f2e905a13d3a317be9941bc355a9e67c8a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7540, "license_type": "no_license", "max_line_length": 88, "num_lines": 192, "path": "/model/process_data.py", "repo_name": "catli/student-lesson-optimizer", "src_encoding": "UTF-8", "text": "import numpy as np\nimport json\nfrom sklearn.model_selection import train_test_split\nimport torch.nn.utils as utils\nimport pdb\n\n\ndef convert_token_to_matrix(batch_index, json_data, json_keys, content_num):\n '''\n convert the token to a multi-hot vector\n from student session activity json_data\n convert this data in batch form\n '''\n # [TODO SLO]:\n # [Optional] change input so that each\n # skill worked on is fed in sequential order\n\n # number of students in the batch\n num_sess = []\n # max number of sessions in batch\n for student_index in batch_index:\n # return the key pairs (student_id, seq_len)\n # and the first item of pair as student id\n student_key = json_keys[student_index][0]\n num_sess.append(len(json_data[student_key].keys())-1)\n max_seq = np.max(num_sess) + 1\n seq_lens = num_sess\n\n input_padded, label_padded, label_mask = create_padded_matrix_with_correct(\n batch_index, json_data, json_keys, content_num, max_seq)\n\n # assign the number of sessions as sequence length for each student\n # this will feed be used later to tell the model\n # which sessions are padded\n return input_padded, label_padded, label_mask, seq_lens\n\n\n\n\ndef create_padded_matrix_with_correct(batch_index, json_data, json_keys,\n content_num, max_seq):\n '''\n input:\n json_data: {student_key: session_id: [(skill_id, is_correct}]}\n batch_index = the student id in the batch\n output:\n but input and label will translate row for each session\n and column for each skill, populating with perc correct\n for each data. A problem not worked on is set to 0\n [[ 0 0.2 0 0.85 ]]\n steps:\n create an empty matrix for the padded input /output\n populated with the count/binomial state, concatenated\n with the percent correct\n output vectors populated with the binomial state\n '''\n batchsize = len(batch_index)\n # placeholder for padded input and label\n input_padded = np.zeros((batchsize, int(max_seq), content_num), int)\n correct_padded = np.zeros((batchsize, int(max_seq), content_num), int)\n label_mask_padded = np.zeros((batchsize, int(max_seq), content_num), int)\n # populate student_padded\n for stud_num, student_index in enumerate(batch_index):\n # return the key pairs (student_id, seq_len)\n # and the first item of pair as student id\n student_key = json_keys[student_index][0]\n sessions = sorted(json_data[student_key].keys())\n for sess_num, session in enumerate(sessions):\n # sessions data, with tuples of student activity\n # content_items = (exercise_id , is_correct)\n content_items = json_data[student_key][session]\n for item_num, item in enumerate(content_items):\n exercise_id = item[0]\n is_correct = item[1]\n label_mask_padded[stud_num, sess_num, exercise_id-1] = 1\n input_padded[stud_num, sess_num, exercise_id-1]+= 1\n correct_padded[stud_num, sess_num, exercise_id-1]+= is_correct\n concat_input_padded, perc_correct_padded = concat_perc_correct(\n correct_padded, input_padded)\n # take first n-1 sessions for input and last n-1 sessions for output\n concat_input_padded = concat_input_padded[:, :-1]\n # generate the labels and mask by averaging over multiple sessions\n # default set to no averaging (num_next = 1)\n label_padded, label_mask = max_next_sessions(perc_correct_padded, label_mask_padded,\n num_next = 1)\n return concat_input_padded, label_padded, label_mask\n\n\ndef concat_perc_correct(correct_padded, input_padded):\n '''\n calculate the perc correct for activtiies worked on\n and then concatenate with input matrix\n '''\n # create denominator\n correct_denom = input_padded.copy()\n # set 0 to 1 for divisbility\n correct_denom[correct_denom == 0] = 1\n # divide correct by denom\n perc_correct_padded = correct_padded/correct_denom\n # concatenate the input and ocrrect\n concat_input_padded = np.concatenate((input_padded, perc_correct_padded),\n axis=2)\n return concat_input_padded, perc_correct_padded\n\n\n\ndef max_next_sessions(perc_correct_padded, label_padded, num_next):\n '''\n For the next x sessions, create a new\n output that returns the max of _num_next_ sessions.\n Only works if dim(perc_correct_padded) = dim(label_padded)\n '''\n perc_correct_padded = perc_correct_padded[:, 1:]\n label_padded = label_padded[:, 1:]\n next_label_mask = label_padded.copy()\n next_correct_padded = perc_correct_padded.copy()\n for b, _ in enumerate(label_padded):\n for i, _ in enumerate(label_padded[b]):\n next_label_mask[b, i, :] = np.max(\n label_padded[b, i:(i+num_next), :], axis=0)\n next_correct_padded[b, i, :] = np.max(\n perc_correct_padded[b, i:(i+num_next), :], axis=0)\n return next_correct_padded, next_label_mask\n\n\ndef extract_content_map(content_index_filename):\n '''\n\n '''\n index_reader = open(content_index_filename, 'r')\n exercise_to_index_map = json.load(index_reader)\n content_num = len(exercise_to_index_map.keys())\n return exercise_to_index_map, content_num\n\n\ndef split_train_and_test_data(exercise_filename, content_index_filename,\n test_perc = 0 ):\n '''\n split the data into training and test by learners\n input: exercise file with json data\n {'anon_student_id': session_1: [(skill_key, %_correct),\n (skill_key, %_correct)], session_2:\n [(skill_key, %_correct), (skill_key, %_correct)]}\n '''\n exercise_reader = open(exercise_filename, 'r')\n full_data = json.load(exercise_reader)\n train_data = {}\n val_data = {}\n ordered_train_keys, ordered_val_keys = split_train_and_test_ids(\n json_data=full_data,\n test_perc=test_perc)\n # to expose the json file\n index_reader = open(content_index_filename, 'r')\n exercise_to_index_map = json.load(index_reader)\n return ordered_train_keys, ordered_val_keys, full_data \n\n\ndef split_train_and_test_ids(json_data, test_perc):\n '''\n split anon ids into test_perc % in test dataset\n and 1-test_perc % in training dataset\n '''\n student_ids = [student for student in json_data]\n train_ids, val_ids = train_test_split(student_ids,\n test_size=test_perc)\n ordered_train_keys = create_ordered_sequence_list(train_ids, json_data)\n ordered_val_keys = create_ordered_sequence_list(val_ids, json_data)\n return ordered_train_keys, ordered_val_keys\n\n\ndef create_ordered_sequence_list(set_ids, exercise_json):\n '''\n create ordered sequence length\n will be used for batching to efficiently\n run through the sequence ids\n '''\n key_seq_pair = create_key_seqlen_pair(set_ids, exercise_json)\n key_seq_pair.sort(key=lambda x: x[1], reverse=True)\n return key_seq_pair\n\n\ndef create_key_seqlen_pair(set_ids, json_data):\n '''\n create a tuple with learner id and the sequence length,\n i.e. number of sessions per learner\n ('$fd@w', 4)\n '''\n key_seq_pair = []\n for id in set_ids:\n seq_len = len(json_data[id])\n key_seq_pair.append((id, seq_len))\n return key_seq_pair\n\n\n\n" } ]
4
vanessamendoza/flipitapp
https://github.com/vanessamendoza/flipitapp
1a31b6e8438025b0d0ea2ed9ce52d3e61ad6a7dd
41db97ab07c6ed11568dc76b2c6091ecf71b1085
6d97ac3ba0dd2d397c7fa19c842b568d9d5db7e2
refs/heads/master
2020-06-23T15:25:09.085770
2019-07-24T15:45:28
2019-07-24T15:45:28
198,662,923
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.593406617641449, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 17.399999618530273, "blob_id": "cb598cc783573900752ef6956671e85e9b22c75b", "content_id": "e95b4f75ff24dfb49d9414a8a50faf22530a6375", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 91, "license_type": "no_license", "max_line_length": 25, "num_lines": 5, "path": "/app/models/model.py", "repo_name": "vanessamendoza/flipitapp", "src_encoding": "UTF-8", "text": "def reverseit(word):\n print(word[::-1])\n return word[::-1]\n\nprint(reverseit(\"hello\"))" }, { "alpha_fraction": 0.682812511920929, "alphanum_fraction": 0.682812511920929, "avg_line_length": 29.5238094329834, "blob_id": "16ce021b4efc73effc3bdd78ae85017870ee1079", "content_id": "a4188587005bb1a741ba0d7e2f9320f215eafb4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 61, "num_lines": 21, "path": "/app/routes.py", "repo_name": "vanessamendoza/flipitapp", "src_encoding": "UTF-8", "text": "from app import app\nfrom flask import render_template, request\nfrom app.models import model, formopener\n\[email protected]('/')\[email protected]('/index')\ndef index():\n return render_template('index.html')\n\[email protected]('/results', methods = ['GET', 'POST'])\ndef results():\n userdata = dict(request.form)\n print(userdata) #What is userdata??\n word = userdata['word']\n print(word) #What is word??\n a = model.reverseit(word) ##going to model file\n ##calling reverseit\n ##passing in word that we stored\n ##storing the return value from that function as reversed\n print(a)\n return render_template('results.html', reversed = a)" } ]
2
geekjimbo/textloader
https://github.com/geekjimbo/textloader
4a98102d2527eb083eee86dbf13bc8238408577e
7d6a028635bd12cdc01643ecdf9835010cda744b
136df10ed1c105bcdca7e824f306b186540dc514
refs/heads/master
2020-05-26T04:57:14.107394
2019-05-22T21:04:54
2019-05-22T21:04:54
188,113,700
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6399999856948853, "alphanum_fraction": 0.6618182063102722, "avg_line_length": 16.1875, "blob_id": "61e4f32a9526372707756c61de279bffd0f4bf12", "content_id": "3fa5441cb4e87367ed49a79dc936ef0f2d7161fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 50, "num_lines": 16, "path": "/src/clean.py", "repo_name": "geekjimbo/textloader", "src_encoding": "UTF-8", "text": "# load data sets\nimport pandas as pd\nimport yaml\n\n_config = yaml.load( open('./config/config.yml')) \n\ndf1 = pd.read_fwf(_config['file_path_1'])\ndf2 = pd.read_fwf(_config['file_path_2'])\n\nframes = [df1, df2]\n\ndf = pd.concat( frames )\n\n\n# integrate data sets \n# clean data set\n" } ]
1
MaxBranvall/DWG_Bot
https://github.com/MaxBranvall/DWG_Bot
d582043fba20a8cb08dcbe3a7d349d7574f621c9
f3bd68a3fa1cb1a44f1a393c59a5099fd786ec9d
e2cc6a9bd9038439f39ba09f9556c2398fcc692b
refs/heads/master
2020-03-31T16:10:15.252809
2018-11-06T06:25:28
2018-11-06T06:25:28
152,365,174
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.49597522616386414, "alphanum_fraction": 0.5120742917060852, "avg_line_length": 23.86153793334961, "blob_id": "3320fd57300883c209fbb5bc3018fc1d7674a904", "content_id": "a19910a468d4c3675d03d9d4f31306b4cdcd8f13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1615, "license_type": "no_license", "max_line_length": 70, "num_lines": 65, "path": "/csvToMdTable.py", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "import csv\nimport scraper\nfrom datetime import datetime\n\ndef main(mdFile=None):\n\n readXboxOne = csv.reader(open(scraper.finalXboxOneTablePath, 'r'))\n readXbox360 = csv.reader(open(scraper.finalXbox360TablePath, 'r'))\n now = datetime.now()\n\n # Create a markDown file if none is specified\n if mdFile == 'createNew':\n\n mdFileName = (f'{now.hour - 12}:{now.minute}.{now.second}.md')\n mdFilePath = (f'csvAndMarkDown/markDownFiles/testFinish.md')\n with open(mdFilePath, 'w'):\n pass\n\n else:\n mdFileName = ('markdownTable.md')\n mdFilePath = (f'csvAndMarkDown/markDownFiles/{mdFileName}')\n with open(mdFilePath, 'w'):\n pass\n\n horizontal = '---|------|---\\n'\n\n with open(mdFilePath, 'w') as mdTable:\n\n line = 0\n\n for row in readXboxOne:\n\n try:\n mdTable.write(f'{row[0]} | {row[1]} | {row[2]}\\n')\n\n if line == 1:\n mdTable.write(horizontal)\n\n # This will be run for the title of the table\n except IndexError:\n mdTable.write(f'\\n{row[0]}\\n\\n')\n\n line += 1\n\n line = 0\n\n for row in readXbox360:\n\n if line == 0:\n mdTable.write('-')\n\n try:\n mdTable.write(f'{row[0]} | {row[1]} | {row[2]}\\n')\n\n if line == 1:\n mdTable.write(horizontal)\n\n # Run for the title of the table\n except IndexError:\n mdTable.write(f'\\n{row[0]}\\n\\n')\n\n line += 1\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6302118897438049, "alphanum_fraction": 0.679425835609436, "avg_line_length": 43.33333206176758, "blob_id": "90cfce7911b425f0d6d83a2e79eeb066da7640d6", "content_id": "fae81d148d6d3420163ee5bf8aea5f69275ed632", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1487, "license_type": "no_license", "max_line_length": 79, "num_lines": 33, "path": "/testFiles/csvAndMarkDown/markDownFiles/untitledTable.md", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "\nContent Title | Content Type | Discount\n---|------|---\nShadow of the Tomb Raider* | Xbox One X Enhanced | 25%\nBlack Mirror* | Xbox One Game | 80%\nUnravel Yarny Bundle* | Xbox One X Enhanced | 35%\nNBA LIVE 19* | Xbox One X Enhanced | 33%\nRobocraft Infinity* | Xbox One X Enhanced | 60%\nTroll & I* | Xbox One Game | 70%\nThe Sun and Moon* | Xbox One Game | 70%\nQuiplash* | Xbox One Game | 45%\nLetter Quest: Grimm’s Journey Remastered* | Xbox One Game | 70%\nVertical Drop Heroes HD* | Xbox One Game | 70%\nShred It! * | Xbox One Game | 40%\nRad Rodgers* | Xbox One Game | 30%\nRogue Stormers* | Xbox One Game | 85%\nLichdom: Battlemage* | Xbox One Game | 70%\nUnravel Two* | Xbox One X Enhanced | 25%\nMadden NFL 19: Hall of Fame Edition* | Xbox One X Enhanced | 40%\nShadow of the Tomb Raider – Croft Edition* | Xbox One X Enhanced | 25%\nShadow of the Tomb Raider – Digital Deluxe Edition* | Xbox One X Enhanced | 25%\nMadden NFL 19* | Xbox One X Enhanced | 35%\nNinjin: Clash of Carrots* | Xbox One Game | 25%\nChild of Light* | Xbox One Game | 70%\nChild of Light – The Golem’s Plight Pack | Add-On | 50%\nChild of Light – Light Pack | Add-On | 50%\nChild of Light – Dark Pack | Add-On | 50%\nTropico 5 – Epic Meltdown | Add-On | 75%\nTropico 5 – Espionage | Add-On | 75%\nTropico 5 – Paradise Lost | Add-On | 75%\nTropico 5 – Waterborne | Add-On | 75%\nThe Shapeshifting Detective (pre-order) | Xbox One Game | 20%\nThe Bridge | Xbox One Game | 70%\nIron Wings | Xbox One Game | 50%" }, { "alpha_fraction": 0.7460317611694336, "alphanum_fraction": 0.7492063641548157, "avg_line_length": 23.30769157409668, "blob_id": "3bf1610f944a725e9f12ac561f16e35024024b9e", "content_id": "84a04317aae57ce294c08925bfea9bdd5d1973e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 59, "num_lines": 13, "path": "/testFiles/testing.py", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "from lxml import html\nfrom bs4 import BeautifulSoup\n\nmdTablePath = 'csvAndMarkDown/markDownFiles/testFinish.md'\nendOfPostPath = 'csvAndMarkDown/markDownFiles/endOfPost.md'\n\nwith open(mdTablePath, 'r') as x:\n mainPost = x.read()\n\nwith open(endOfPostPath, 'r') as x:\n ending = x.read()\n\nprint(mainPost + ending)" }, { "alpha_fraction": 0.7743701934814453, "alphanum_fraction": 0.7907995581626892, "avg_line_length": 64.21428680419922, "blob_id": "240fd8ffe5f21f81b4232d13838bbde5db4ec64a", "content_id": "56abb890ce45b244061af0ac06e10386d54d98c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 913, "license_type": "no_license", "max_line_length": 289, "num_lines": 14, "path": "/README.md", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "# Deals with Gold Bot\nThis is a bot that automatically converts MajorNelson's xbox sales into markdown tables. It also replaces the discount percentage with the discount price that links to the microsoft page for that game/add-on. These tables are then posted to /r/XboxOne for the convenience of the community.\n\nFuture versions will include info from sites such as TrueAchievements, MetaCritic, and HowLongToBeat integrated into the tables for each game.\n\n# Known Bugs\n* Games/add-ons with no href display the discount percentage and do not link to anything.\n\n# Patched\n* Bug01- Xbox360 prices were assigned to wrong games because the bot skipped a game when parsing the major nelson table.\n\n* Bug02- When games are included in game pass the price shows as 'included'.\n\n* Bug03- When an xbox 360 game has the same title as an xbox one game the 360 game will be skipped. Games with no href are not added to list.\n" }, { "alpha_fraction": 0.4905063211917877, "alphanum_fraction": 0.5213607549667358, "avg_line_length": 24.795917510986328, "blob_id": "c3649aa126a389ab16974e0c27cd4f0c88587cde", "content_id": "cf4f9fec2e69e0c41012396aeffaae910d9e1773", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 141, "num_lines": 49, "path": "/testFiles/testGameSplitTA.py", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "import csv\nimport requests\nfrom lxml import html\n\n\nheader = { \n 'USER-AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n}\ncsvFile = 'csvTable.csv'\ntrueAchievementsURL = 'https://www.trueachievements.com/game/'\n\ncsvRead = csv.reader(open(csvFile, 'r'))\n\nfor row in csvRead:\n\n try:\n if row[0] == 'Content Title':\n pass\n \n elif row[0] == 'Xbox One Table':\n pass\n\n else:\n gameSplit = row[0].split()\n print(gameSplit)\n gameJoin = '-'.join(gameSplit)\n print(gameJoin)\n\n if '*' in gameJoin:\n print('yes')\n gameJoin = gameJoin.strip('*')\n # print(gameJoin)\n\n taURL = trueAchievementsURL + gameJoin\n print(taURL)\n\n TAreq = requests.get(taURL, headers= header)\n x = html.fromstring(TAreq.content)\n\n y = x.xpath('//*[@id=\"sidebar\"]/div[2]/div[2]/div[1]/div/div[1]/text()')\n print(y[0])\n\n foo = x.xpath('//*[@id=\"sidebar\"]/div[2]/div[2]/div[1]/div/div[3]/text()')\n print(foo[0])\n\n break\n except IndexError:\n print()\n pass\n" }, { "alpha_fraction": 0.657706081867218, "alphanum_fraction": 0.6804062128067017, "avg_line_length": 25.171875, "blob_id": "f9e0404bf0a8f358ca6f49b493815a03f2664935", "content_id": "722bd1d490586ee4d9f4f0b4d336fe29ba243a0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1674, "license_type": "no_license", "max_line_length": 99, "num_lines": 64, "path": "/testFiles/tableTest.py", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "import csv\nimport os\n\ncsvFile = 'csvAndMarkDown/csvFiles/xboxOneTable.csv'\nx360File = 'csvAndMarkDown/csvFiles/xbox360Table.csv'\n\n# csvPath = os.path.dirname(csvFile)\n# print(csvPath)\n\nopenCsv = csv.reader(open(csvFile, 'r'))\nopenx360 = csv.reader(open(x360File, 'r'))\n\nnewList = []\n\ni = 0\n\nfor row in openCsv:\n newList.append(row)\n \n# print(newList)\n\nnewList1 = []\n\ni = 0\n\nfor row in openx360:\n newList1.append(row)\n\n\n# newList1[2:] = sorted(newList1[2:])\n\n# newList[2:] = sorted(newList[2:])\n\n# testCsv = csv.writer(open('testFile.csv', 'w'))\n\n# for item in newList:\n# testCsv.writerow(item)\n\n# for item in newList1:\n# testCsv.writerow(item)\n\n\n# testList = ['shadow of the tomb raider', 'black mirror', 'assassins creed', 'rocket league']\n\n# shadowList = ['Shadow of the Tomb Raider*', 'Xbox One X Enhanced', '25%']\n# blackList = ['Black Mirror*', 'Xbox One Game', '80%']\n# unravelList = ['Unravel Yarny Bundle*', 'Xbox One X Enhanced', '35%']\n# nbaList = ['NBA LIVE 19*', 'Xbox One X Enhanced', '33%']\n# robocraftList = ['Robocraft Infinity*', 'Xbox One X Enhanced', '60%']\n\n# newList = [shadowList, blackList, unravelList, nbaList, robocraftList]\n\n# newList.sort()\n\n# print(newList)\n\n# FILE = open(csvFile, 'r') # %IN_PATH% would be the path to the file you want to open\n# lines = FILE.readlines() #takes the lines from the file and puts each line as its own list object\n# FILE.close() #closes the file to prevent corruption\n# ordered_lines = sorted(lines) #sorts the lines alphanumerically\n# FILE = open(\"testTest.csv\", 'w') #opens file to output sorted version to\n# for i in range(len(ordered_lines)):\n# FILE.write(ordered_lines[i])\n# FILE.close()" }, { "alpha_fraction": 0.6739864945411682, "alphanum_fraction": 0.6739864945411682, "avg_line_length": 24.782608032226562, "blob_id": "00fe11bd688464d5ae6333c471f2900407ee9c70", "content_id": "dcbc3963a14b95e04f7e3ff079d080cd0281b4fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "no_license", "max_line_length": 125, "num_lines": 23, "path": "/DWG_BOT.py", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "import requests\nimport praw\n\ndef main():\n\n mdTablePath = 'csvAndMarkDown/markDownFiles/markdownTable.md'\n endOfPostPath = 'csvAndMarkDown/markDownFiles/endOfPost.md'\n\n with open(mdTablePath, 'r') as x:\n mainPost = x.read()\n\n with open(endOfPostPath, 'r') as x:\n ending = x.read()\n\n redditInstance = praw.Reddit('dwgBot')\n\n testSub = redditInstance.subreddit('XboxOne')\n\n testSub.submit('This Weeks Deals with Gold and Spotlight Sale! Formatted for Easy Reading!', selftext= mainPost + ending)\n print('\\nSubmitted!')\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.7190553545951843, "alphanum_fraction": 0.7353420257568359, "avg_line_length": 33.13888931274414, "blob_id": "f34ef96ec08e23b62d596f6e4042ca41d557b88a", "content_id": "b357fd94d1f991196902c86a56c032231ce0b5dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1228, "license_type": "no_license", "max_line_length": 264, "num_lines": 36, "path": "/csvAndMarkDown/markDownFiles/endOfPost.md", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "-\n---\n\nAn asterisk (*) specifies that the offer is only valid for Xbox Live Gold members.\n\nA plus sign (+) specifies that the title offers in-app purchases.\n\n---\n\n[Link to Major Nelson's website](https://majornelson.com/2018/11/05/this-weeks-deals-with-gold-and-spotlight-sale-138/)\n\n---\n-\n^(I'm a bot serving the r/XboxOne community each week!)\n\n^(I'm still in my very beginning stages right now. In the future I will be providing you with lots of useful info for each game such as MetaCritic scores, HowLongToBeat times, and info from TrueAchievements will all be integrated into each table for easy viewing.)\n\n^(This will be your one stop shop for Deals with Gold, Spotlight, and Publisher sales each week!)\n\nDisclaimer: Being a new bot I may contain a bug or two, please [notify me](https://www.reddit.com/message/compose/?to=stratcat22) if you happen to catch any. Thanks so much!\n\n---\nHave any questions, comments, concerns, suggestions, jokes, memes?\n-\n[Shoot my human a message!](https://www.reddit.com/message/compose/?to=stratcat22)\n-\n---\n\nBig shoutout to /u/mal68 for suggesting the creation of this bot!\n-\n---\n\n^^Coded ^^with ^^love ^^by ^^u/stratcat22 ^^<3\n\n[Source Code](https://github.com/MaxBranvall/DWG_Bot)\n---" }, { "alpha_fraction": 0.5507199168205261, "alphanum_fraction": 0.5712532997131348, "avg_line_length": 29.559999465942383, "blob_id": "bdee5c1da1ec93de1107671f89e0a7440921bca4", "content_id": "bc512f912f20e27dac2baa5c0957cde63d36319d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12224, "license_type": "no_license", "max_line_length": 147, "num_lines": 400, "path": "/scraper.py", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "import requests, csv\nimport csvHandler, DWG_BOT\nfrom collections import OrderedDict\nfrom time import time\nfrom bs4 import BeautifulSoup\n\nstartTime = time()\n\ndate = '2018/11/05'\nsaleNumber = '138'\n\nxboxOneDictionary = {}\nxbox360Dictionary = {}\n\nheaderList = []\ngameDataList = []\nxboxOnePriceList = []\nxbox360PriceList = []\nremoveFromPrice = ['with', 'Xbox', 'Live', 'Gold']\n\nxboxOneTablePath = 'csvAndMarkDown/csvFiles/xboxOneTable.csv'\nxbox360TablePath = 'csvAndMarkDown/csvFiles/xbox360Table.csv'\nfinalXboxOneTablePath = 'csvAndMarkDown/csvFiles/finalXboxOneTable.csv'\nfinalXbox360TablePath = 'csvAndMarkDown/csvFiles/finalXbox360Table.csv'\n\nheader = {'USER-AGENT': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\ntestHeader = {'USER-AGENT': 'TestBot'}\n\nmajNelsonURL = (f'https://majornelson.com/{date}/this-weeks-deals-with-gold-and-spotlight-sale-{saleNumber}/')\ntrueAchievementsURL = 'https://www.trueachievements.com/game/'\ntestUrl = 'html/week3.html'\n\n# Debugging\nbreakForDebug = 500\ndebugMode = False\n\n\nclass Utility:\n\n def clearFile(filePath):\n with open(filePath, 'w'):\n pass\n\n def requestWebPage(mode=None, href=None):\n\n if mode == 'getPrice':\n getStorePage = requests.get(href, headers= header)\n storePageSoup = BeautifulSoup(getStorePage.text, 'html5lib')\n return storePageSoup\n\n else:\n if debugMode == True:\n x = open(testUrl, 'r')\n nelsonSoup = BeautifulSoup(x, 'html5lib')\n return nelsonSoup\n\n else:\n x = requests.get(majNelsonURL, headers={'USER-AGENT': 'Mozilla 5.0'})\n print(f'Status Code: {x}')\n nelsonSoup = BeautifulSoup(x.text, 'html5lib')\n return nelsonSoup\n\n def getGamePrice():\n\n nelsonSoup = Utility.requestWebPage()\n\n Utility.processAnchorTags(nelsonSoup)\n xboxOneDict, xbox360Dict = Utility.sortDictionaries()\n\n Utility.getXboxOnePrices(xboxOneDict)\n Utility.getXbox360Prices(xbox360Dict)\n\n openXboxOne, writeToXboxOne, readFromXboxOne = Utility.xboxOneFiles()\n openXbox360, writeToXbox360, readFromXbox360 = Utility.xbox360Files()\n\n Utility.addPricesToXboxOneTable(readFromXboxOne, writeToXboxOne)\n Utility.addPricesToXbox360Table(readFromXbox360, writeToXbox360)\n\n openXboxOne.close()\n openXbox360.close()\n\n def processAnchorTags(nelsonSoup):\n\n for anchorTag in nelsonSoup.find_all(['tr', 'td', 'a'], {'rel': 'noopener'}):\n\n # First few a tags have no text, this skips those\n if anchorTag.text == '':\n pass\n\n # if the text has been added to the dict, pass, if not, add the game name with its href\n else:\n\n try:\n if 'microsoft' in anchorTag['href']:\n\n if anchorTag.text not in xboxOneDictionary.keys():\n\n try:\n xboxOneDictionary[anchorTag.text] = anchorTag['href']\n\n except KeyError:\n pass\n\n elif 'microsoft' not in anchorTag['href']:\n\n if anchorTag.text not in xbox360Dictionary.keys():\n\n try:\n xbox360Dictionary[anchorTag.text] = anchorTag['href']\n\n except KeyError:\n pass\n\n else:\n pass\n\n # If there is no href for the entry, set it's price to null\n except KeyError:\n xboxOneDictionary[anchorTag.text] = 'null'\n\n def sortDictionaries():\n\n sortedXboxOneDict = OrderedDict(sorted(xboxOneDictionary.items()))\n sortedXbox360Dict = OrderedDict(sorted(xbox360Dictionary.items()))\n\n print('Xbox One')\n print(len(sortedXboxOneDict.keys()))\n print('Xbox 360')\n print(len(sortedXbox360Dict.keys()))\n\n return sortedXboxOneDict, sortedXbox360Dict\n\n def getXboxOnePrices(xboxOneDict):\n\n debugLoopBreak = 0\n priceIterationNumber = 0\n\n for game, href in xboxOneDict.items():\n\n if debugLoopBreak == breakForDebug:\n break\n\n if href == 'null':\n xboxOnePriceList.append('null')\n print(f'(X1) Retrieved price: {priceIterationNumber}!')\n\n else:\n\n storePageSoup = Utility.requestWebPage(mode='getPrice', href=href)\n\n try:\n discountedPrice = storePageSoup.find('div', {'class': 'remediation-cta-label'})\n discountedPrice = discountedPrice.text.split()\n\n except AttributeError:\n\n try:\n discountedPrice = storePageSoup.find('span', {'class': 'price-disclaimer'})\n discountedPrice = discountedPrice.find('span').text.split()\n\n except AttributeError:\n discountedPrice = storePageSoup.find('div', {'class': 'pi-price-text'})\n discountedPrice = discountedPrice.find('span').text.split()\n\n for keyword in removeFromPrice:\n if keyword in discountedPrice:\n discountedPrice.remove(keyword)\n\n if discountedPrice[0] == 'Included':\n discountedPrice = storePageSoup.find_all('span', {'class': 'price-disclaimer'})\n discountedPrice = [discountedPrice[0].text]\n\n elif discountedPrice[0] == 'Free':\n try:\n discountedPrice = storePageSoup.find('div', {'class': 'pi-price-text'})\n discountedPrice = discountedPrice.find('span').text.split()\n\n if discountedPrice == []:\n raise AttributeError\n\n except AttributeError:\n discountedPrice = storePageSoup.find('span', {'class': 'price-disclaimer'})\n discountedPrice = discountedPrice.find('span').text.split()\n\n xboxOnePriceList.append(f'[{discountedPrice[0]}]({href})')\n print(f'(X1) Retrieved price: {priceIterationNumber}!')\n\n priceIterationNumber += 1\n debugLoopBreak += 1\n\n def getXbox360Prices(xbox360Dict):\n\n debugLoopBreak = 0\n priceIterationNumber = 0\n\n for game, href in xbox360Dict.items():\n\n if debugLoopBreak == breakForDebug:\n break\n\n storePageSoup = Utility.requestWebPage(mode='getPrice', href=href)\n\n try:\n discountedPrice = storePageSoup.find('span', {'class': 'GoldPrice ProductPrice'})\n discountedPrice = discountedPrice.text\n\n except AttributeError:\n discountedPrice = storePageSoup.find('span', {'class': 'SilverPrice ProductPrice'})\n discountedPrice = discountedPrice.text\n\n xbox360PriceList.append(f'[{discountedPrice}]({href})')\n print(f'(X360) Retrieved price: {priceIterationNumber}!')\n\n priceIterationNumber += 1\n debugLoopBreak += 1\n\n def xboxOneFiles():\n\n openXboxOne = open(finalXboxOneTablePath, 'w')\n writeToXboxOne = csv.writer(openXboxOne)\n readFromXboxOne = csv.reader(open(xboxOneTablePath, 'r'))\n\n return openXboxOne, writeToXboxOne, readFromXboxOne\n\n def xbox360Files():\n\n openXbox360 = open(finalXbox360TablePath, 'w')\n writeToXbox360 = csv.writer(openXbox360)\n readFromXbox360 = csv.reader(open(xbox360TablePath, 'r'))\n\n return openXbox360, writeToXbox360, readFromXbox360\n\n def addPricesToXboxOneTable(readFromXboxOne, writeToXboxOne):\n\n debugLoopBreak = 0\n lineNumber = 0\n priceIndexNumber = 0\n\n for line in readFromXboxOne:\n\n if debugLoopBreak == breakForDebug:\n break\n\n if lineNumber == 0 or lineNumber == 1: # Skip the first two lines\n pass\n\n else:\n # For each price in the priceList, assign price to last index of each line\n if xboxOnePriceList[priceIndexNumber] == 'null':\n pass\n\n else:\n line[-1] = xboxOnePriceList[priceIndexNumber]\n\n priceIndexNumber += 1\n\n lineNumber += 1\n debugLoopBreak += 1\n\n writeToXboxOne.writerow(line)\n\n def addPricesToXbox360Table(readFromXbox360, writeToXbox360):\n\n debugLoopBreak = 0\n lineNumber = 0\n priceIndexNumber = 0\n\n for line in readFromXbox360:\n\n if debugLoopBreak == breakForDebug:\n break\n\n if lineNumber == 0 or lineNumber == 1:\n pass\n\n else:\n # For each price in the priceList, assign price to last index of each line\n try:\n line[-1] = xbox360PriceList[priceIndexNumber]\n\n except IndexError:\n break\n\n priceIndexNumber += 1\n lineNumber += 1\n debugLoopBreak += 1\n\n writeToXbox360.writerow(line)\n\n\nclass MajorNelsonScrape:\n\n def __init__(self):\n\n # Clear files\n Utility.clearFile(xboxOneTablePath)\n Utility.clearFile(xbox360TablePath)\n\n # Send a request.get to major nelson post\n self.nelsonSoup = Utility.requestWebPage()\n\n self.writeToXboxOneTable = csv.writer(open(xboxOneTablePath, 'a'))\n self.writeToXbox360Table = csv.writer(open(xbox360TablePath, 'a'))\n\n self.currentTable = 'Xbox-One'\n\n self.getTableHeaders()\n\n def getTableHeaders(self):\n\n headerNumber = 0\n\n for row in self.nelsonSoup.find_all('tr'):\n\n headerList.clear()\n tableHeaders = row.find_all('th')\n\n for header in tableHeaders:\n\n # Stops 'Notes' header from being added to the table\n if headerNumber == 3:\n break\n\n else:\n if header.text == 'Discount':\n headerList.append('Price (USD)')\n else:\n headerList.append(header.text)\n\n headerNumber += 1\n break\n\n self.getTableContents()\n\n def getTableContents(self):\n\n # Initialize xbox one table\n self.writeToXboxOneTable.writerow(['Xbox One Table'])\n self.writeToXboxOneTable.writerow(headerList)\n\n for row in self.nelsonSoup.find_all('tr')[1:]:\n\n # Clear list on each iteration to prevent dupe writing\n gameDataList.clear()\n\n gameData = row.find_all('td')\n\n # Detects when XboxOne table ends\n if gameData == []:\n\n self.currentTable = 'Xbox-360'\n\n self.writeToXboxOneTable.writerow([])\n\n # Initialize xbox 360 table\n self.writeToXbox360Table.writerow(['Xbox 360 Table'])\n self.writeToXbox360Table.writerow(headerList)\n\n else:\n\n itemNumber = 0\n for item in gameData:\n\n # Breaks before writing 'Notes' column to list\n if itemNumber == 3:\n break\n\n else:\n gameDataList.append(item.text)\n itemNumber += 1\n\n self.writeToTable(gameDataList)\n\n def writeToTable(self, gameData):\n\n if self.currentTable == 'Xbox-One':\n self.writeToXboxOneTable.writerow(gameData)\n\n else:\n self.writeToXbox360Table.writerow(gameData)\n\n\nclass TrueAchievementsScrape:\n pass\n\n\nclass HowLongToBeatScrape:\n pass\n\nclass MetaCriticScrape:\n pass\n\nif __name__ == '__main__':\n MajorNelsonScrape()\n csvHandler.main()\n DWG_BOT.main()\n endTime = time()\n endTime = (float(f'{(endTime - startTime) / 60}'))\n print(f'\\nTime elapsed: {endTime:.2f}')\n print('Success!')\n" }, { "alpha_fraction": 0.6080066561698914, "alphanum_fraction": 0.6538782119750977, "avg_line_length": 21.641510009765625, "blob_id": "1f8a5d0e64a9dd3c028fe9935a2fc1fd930a4578", "content_id": "4f7d53f16961cc344e5b75610e75460feb5f4a38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 65, "num_lines": 53, "path": "/csvHandler.py", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "import csv\nimport scraper, csvToMdTable\n\nopenXboxOne = open(scraper.xboxOneTablePath, 'w')\nopenXbox360 = open(scraper.xbox360TablePath, 'w')\n\nwriteToXboxOne = csv.writer(openXboxOne)\nwriteToXbox360 = csv.writer(openXbox360)\n\ndef main():\n\n xboxOneList = []\n xbox360List = []\n\n readXboxOne = csv.reader(open(scraper.xboxOneTablePath, 'r'))\n readXbox360 = csv.reader(open(scraper.xbox360TablePath, 'r'))\n\n for row in readXboxOne:\n xboxOneList.append(row)\n\n for row in readXbox360:\n xbox360List.append(row)\n\n sortLists(xboxOneList, xbox360List)\n\ndef sortLists(xOneList, x360List):\n\n xOneList[2:] = sorted(xOneList[2:])\n x360List[2:] = sorted(x360List[2:])\n\n scraper.Utility.clearFile(scraper.xboxOneTablePath)\n scraper.Utility.clearFile(scraper.xbox360TablePath)\n\n for line in xOneList:\n if line == []:\n pass\n else:\n writeToXboxOne.writerow(line)\n\n for line in x360List:\n if line == []:\n pass\n else:\n writeToXbox360.writerow(line)\n\n openXboxOne.close()\n openXbox360.close()\n\n scraper.Utility.getGamePrice()\n csvToMdTable.main()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.6796875, "alphanum_fraction": 0.68359375, "avg_line_length": 12.421052932739258, "blob_id": "5e403520db85abcab6c2ffeee4296b14e3c0306a", "content_id": "8c8067ded5cf3ecf860d18a374a3c036d3a83189", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 40, "num_lines": 19, "path": "/testFiles/testCsvToMd.py", "repo_name": "MaxBranvall/DWG_Bot", "src_encoding": "UTF-8", "text": "import csv\nimport csvToMdTable\n\ncsvFile = 'csvTable.csv'\ncsvRead = csv.reader(open(csvFile, 'r'))\nmdFile = 'mdTest.md'\n\nmdOpen = open(mdFile, 'w')\n\ni = 0\n\nfor row in csvRead:\n\n print(row)\n\nmdOpen.close()\n# csvRead.close()\n\n# csvToMdTable.main(csvFile)\n\n" } ]
11
rplanel/species-clustering
https://github.com/rplanel/species-clustering
eeecb7db46f8214a455d421f4eb05e93302e5f4d
5f43e0b412bb851e344e00674e701fd218818810
69d7fd2f75f919da7cd35eb69556cd55f2ced8bd
refs/heads/master
2021-06-22T19:05:49.966986
2017-08-21T13:07:58
2017-08-21T13:07:58
99,905,254
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4286971688270569, "alphanum_fraction": 0.46258804202079773, "avg_line_length": 28.115385055541992, "blob_id": "580887bee4811d41cd6f423ce5b1f62f5544603b", "content_id": "1dc0b6fe67cb28af820b802182de6ebc667f0920", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 4544, "license_type": "no_license", "max_line_length": 150, "num_lines": 156, "path": "/scripts/external/compareClustering.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport os\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser()\n# parser.add_argument(\n# \"-e\", \"--edges\",\n# nargs='?',\n# type = argparse.FileType('r'),\n# default=sys.stdin,\n# help = \"File that contains the distances between each genomes (on distance per-line)\"\n# )\n\n# parser.add_argument(\n# \"-o\", \"--output\",\n# nargs='?',\n# type = argparse.FileType('w'),\n# help = \"File that contains the distances between each genomes (on distance per-line)\"\n# )\n\n# parser.add_argument(\n# '-w', '--weight',\n# help='Take into account the link weight',\n# action='store_true'\n# )\n\n# args = parser.parse_args()\n\n\nout_dir = './metric-results'\nout_basename = out_dir + '/clustering-similarity-result'\n#sketch_sizes = ['21-1000', '21-50000', '16-50000', '19-50000', '25-50000']\nsketch_sizes = [\n # '7-5000',\n # '6-5000',\n # '8-5000', \n # '16-1000',\n # '16-50000',\n # '17-50000',\n '18-1000',\n '18-10000',\n '18-5000',\n '18-50000',\n # '19-1000',\n # '19-50000',\n '21-1000',\n '21-50000',\n '21-5000',\n '21-10000',\n # '25-1000',\n # '25-50000',\n]\n\n\nif not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n\n#cluster_methods = ['silix', 'louvain', 'weighted_louvain','infomap']\ncluster_methods = [\n 'louvain',\n # 'weighted_louvain',\n 'silix',\n # 'infomap'\n]\n\nversus = [\n {'vs' : 'rank' ,'rank': 'species' },\n {'vs' : 'rank' ,'rank': 'genus' },\n {'vs' : 'rank' ,'rank': 'family' },\n {'vs' : 'rank' ,'rank': 'order' },\n {'vs' : 'progenome','rank': 'cluster'}\n]\n\n\nfor metric in [\"variation-of-information\"]:\n for vs in versus:\n results = dict()\n distances = []\n header = ['distance', 'metric', 'clustering-method', 'sketch-size']\n\n fh_o = open(out_basename + '-'+ metric + '-' + vs['rank']+'.tsv', 'w')\n fh_o.write('\\t'.join(header) + \"\\n\")\n\n for size in sketch_sizes:\n for method in cluster_methods:\n fh = open(size + '/'+ metric+ '/' + method + '-vs-' + vs['vs'] + '-'+ metric +'-' + vs['rank'] + '-0.tsv', 'r')\n fh.readline()\n dico = dict()\n\n for line in fh:\n line_tr = line.strip()\n columns = line_tr.split(' ')\n columns.append(method)\n columns.append(size)\n fh_o.write('\\t'.join(columns) + \"\\n\")\n \n\n\nfor metric in []: #[\"split-join\"]:\n for vs in versus:\n results = dict()\n distances = []\n header = ['distance', 'metric', 'clustering-method', 'sketch-size']\n\n fh_o = open(out_basename + '-'+ metric + '-' + vs['rank']+'.tsv', 'w')\n fh_o.write('\\t'.join(header) + \"\\n\")\n\n for size in sketch_sizes:\n for method in cluster_methods:\n fh = open(size + '/'+ metric+ '/' + method + '-vs-' + vs['vs'] + '-'+ metric +'-' + vs['rank'] + '.csv', 'r')\n fh.readline()\n dico = dict()\n\n for line in fh:\n line_tr = line.strip()\n columns = line_tr.split(' ')\n if len(columns) == 4:\n columns.pop(1)\n columns.pop(1)\n columns.append(method)\n columns.append(size)\n fh_o.write('\\t'.join(columns) + \"\\n\")\n \n\n\n\n \nmetric='rand-index'\nfor vs in versus:\n results = dict()\n distances = []\n header = ['distance', 'metric', 'clustering-method', 'sketch-size']\n\n fh_o = open(out_basename+ '-' + metric + '-' + vs['rank']+'.tsv', 'w')\n fh_o.write('\\t'.join(header) + \"\\n\")\n \n for size in sketch_sizes:\n for method in cluster_methods:\n if vs['rank'] == 'cluster':\n fh = open(size + '/'+ metric+ '/vs-' + vs['vs'] + '/vs-' + vs['vs'] + '-rand-indexes-' + method + '-0.tsv', 'r')\n else:\n fh = open(size + '/'+ metric+ '/vs-' + vs['vs'] + '/csv/' + method + '-vs-' + vs['vs'] + '-rand-indexes-' + vs['rank'] + '.csv', 'r')\n \n fh.readline()\n dico = dict()\n\n for line in fh:\n line_tr = line.strip()\n columns = line_tr.split(' ')\n line_res = [columns[0], columns[3]]\n line_res.append(method)\n line_res.append(size)\n fh_o.write('\\t'.join(line_res) + \"\\n\")\n\n\n" }, { "alpha_fraction": 0.6040403842926025, "alphanum_fraction": 0.6080808043479919, "avg_line_length": 20.799999237060547, "blob_id": "0b96f06ddabab84d94d0dca35a2abdca25246203", "content_id": "c79d895b122b8c989186c330863d99ebdf2a8d15", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 990, "license_type": "no_license", "max_line_length": 89, "num_lines": 45, "path": "/scripts/external/infomap-output-to-originalid.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport sys\nimport os.path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-c\", \"--clustering\",\n nargs='?',\n type = argparse.FileType('r'),\n default=sys.stdin,\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\nparser.add_argument(\n \"-d\", \"--dico\",\n help = \"\"\n)\n\nparser.add_argument(\n \"-o\", \"--output\",\n nargs='?',\n type = argparse.FileType('w'),\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\nargs = parser.parse_args()\n\n\n\nnew_to_old = dict()\n\n\ndico_fh = open(args.dico, 'r')\nfor line in dico_fh:\n line_tr = line.strip()\n columns = line_tr.split(\"\\t\")\n new_to_old[columns[0]] = columns[1]\n\nfor line in args.clustering:\n if not line.startswith(\"#\"):\n line_tr = line.strip()\n columns = line_tr.split(\" \")\n args.output.write(str(columns[1]) + '\\t' + str(new_to_old[columns[0]]) + '\\n')\n \n" }, { "alpha_fraction": 0.48654353618621826, "alphanum_fraction": 0.5068601369857788, "avg_line_length": 29.31999969482422, "blob_id": "886968d5acf1586fa608e41356ac03b11132b3e6", "content_id": "fca35ec4226ab21601c2bcbe3edf84c9c4f8266c", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 3790, "license_type": "no_license", "max_line_length": 231, "num_lines": 125, "path": "/scripts/external/compare-distance-vibrio-coli.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nimport re\nfrom functools import reduce\nfrom pprint import pprint\n\ndico_annotation = [\n {\n 'name' : 'vibrio',\n 'collection': set(),\n 'pattern' : re.compile(r\"7\\t+\\d+\\.\\w+\\t+Vibrio\"),\n 'min': 0,\n 'max': 0,\n },\n {\n 'name' : 'not_vibrio',\n 'collection': set(),\n 'pattern' : re.compile(r\"7\\t+\\d+\\.\\w+\\t+[^Vibrio]\"),\n 'min': 0,\n 'max': 0,\n },\n\n # {\n # 'name' : 'escherichia_coli',\n # 'collection': set(),\n # 'pattern' : re.compile(r\"7\\t+\\d+\\.\\w+\\t+Escherichia coli\"),\n # 'min': 0,\n # 'max': 0,\n # },\n # {\n # 'name' : 'klebsiella',\n # 'collection': set(),\n # 'pattern' : re.compile(r\"7\\t+\\d+\\.\\w+\\t+.+Klebsiella\"),\n # 'min': 0,\n # 'max': 0,\n # },\n\n # 'vibrio_other' : {\n # 'collection': set()\n # },\n];\ndir_out = r'/home/rplanel/test/vibrio-coli'\ntaxo_patterns = []\n\nannotations_file = open(sys.argv[1], 'r')\ndistance_matrix_file = open(sys.argv[2], 'r')\nfiltered_distance_matrix = open(dir_out+'/out.txt', 'w')\nfiltered_annotation = open(dir_out+'/out-table.tsv', 'w')\n\n#7 633.PRJNA243530 Yersinia pseudotuberculosis 633 Yersinia pseudotuberculosis 629 Yersinia 1903411 Yersiniaceae 91347 Enterobacterales 1236 Gammaproteobacteria 1224 Proteobacteria\n\n\nfiltered_distance_matrix.write(\"node1\\tnode2\\tdistance\\tevalue\\tscore\\n\")\nfiltered_annotation.write(\"node_id\\tstrain_name\\tspecies_taxid\\tspecies\\tgenus_taxid\\tgenus\\tfamily_taxid\\tfamily\\torder_taxid\\torder\\tclass_taxid\\tclass\\tphylum_taxid\\tphylum\\n\")\n\nfor l in annotations_file:\n for value in dico_annotation:\n re_pattern = value['pattern']\n if re_pattern.match(l):\n list_line = l.split(\"\\t\")\n value['collection'].add(list_line[1])\n\nfiltered_annotation.close()\n\nmax = len(dico_annotation)\nmin = 0\ncombination = []\nfor i in range(max):\n for j in range(max):\n if i > j:\n combination.append(int(str(i)+str(j)))\n else:\n continue\n\n## create links possibility\ndef create_edges(edges, num):\n separate_digit = list(map(int,str(num)))\n edges[num] = {\n 'name': dico_annotation[separate_digit[0]]['name'] + \"\\t\" + dico_annotation[separate_digit[1]]['name'],\n 'max' : 0,\n 'min' : 1,\n 'strains': [],\n 'links' : []\n }\n return edges\n\nedges = reduce(create_edges,combination, {})\n\n# Compare the collections\nfor l in distance_matrix_file:\n list_line = l.split(\"\\t\")\n taxids = [\n list_line[0],#.split(\".\")[0],\n list_line[1]#.split(\".\")[0]\n ]\n res = ''\n for taxid in taxids:\n for i, anno in enumerate(dico_annotation):\n if taxid in anno['collection']:\n res += str(i)\n\n if len(res) == 2:\n set_res = { int(res), int(res[::-1]) }\n for combi in combination:\n if combi in set_res:\n distance = float(list_line[2])\n if distance < 0.16:\n edges[combi]['links'].append({\n 'node1': list_line[0],\n 'node2': list_line[1],\n 'distance': distance\n })\n \n if distance < edges[combi]['min']:\n edges[combi]['min'] = distance\n edges[combi]['strains'] = [list_line[0],list_line[1]]\n # print(edges)\n # print(\"====================================\")\n\n\nprint(edges)\nfor k, edge in edges.items():\n for line in edge['links']:\n print(line['node1'] + \"\\t\" + line['node2'] + \"\\t\" + str(line['distance']))\n" }, { "alpha_fraction": 0.623501181602478, "alphanum_fraction": 0.6559951901435852, "avg_line_length": 67.35246276855469, "blob_id": "a2f5666c6bc541ed3de0f6d46cd8c349100bc646", "content_id": "58d4cf970e5ff9aee435f10dbf27aa9d21926dd4", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "R", "length_bytes": 8340, "license_type": "no_license", "max_line_length": 628, "num_lines": 122, "path": "/scripts/external/plot-variation-of-information.R", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "library(ggplot2)\nlibrary(ggrepel)\n\n\n## files <- c(\"clustering-metrix-res-cluster.tsv\",\"clustering-metrix-res-species.tsv\",\"clustering-metrix-res-genus.tsv\")\n## files <- c(\"clustering-metrix-res-species.tsv\",\"clustering-metrix-res-genus.tsv\",\"clustering-metrix-res-family.tsv\",\"clustering-metrix-res-order.tsv\")\n\n## progenome\nfiles <- c(\n \"./metric-results/clustering-similarity-result-rand-index-family.tsv\",\n \"./metric-results/clustering-similarity-result-rand-index-order.tsv\",\n \"./metric-results/clustering-similarity-result-variation-of-information-family.tsv\",\n \"./metric-results/clustering-similarity-result-variation-of-information-order.tsv\",\n \"./metric-results/clustering-similarity-result-rand-index-genus.tsv\",\n \"./metric-results/clustering-similarity-result-rand-index-species.tsv\",\n \"./metric-results/clustering-similarity-result-variation-of-information-genus.tsv\",\n \"./metric-results/clustering-similarity-result-variation-of-information-species.tsv\",\n \"./metric-results/clustering-similarity-result-variation-of-information-cluster.tsv\",\n \"./metric-results/clustering-similarity-result-rand-index-cluster.tsv\"\n)\n\n\n## microscope\n## files <- c(\n## ## \"./metric-results/clustering-similarity-result-rand-index-family.tsv\",\n## ## \"./metric-results/clustering-similarity-result-rand-index-order.tsv\",\n## ## \"./metric-results/clustering-similarity-result-variation-of-information-family.tsv\",\n## ## \"./metric-results/clustering-similarity-result-variation-of-information-order.tsv\",\n## \"./metric-results/clustering-similarity-result-rand-index-genus.tsv\",\n## \"./metric-results/clustering-similarity-result-rand-index-species.tsv\",\n## \"./metric-results/clustering-similarity-result-variation-of-information-genus.tsv\",\n## \"./metric-results/clustering-similarity-result-variation-of-information-species.tsv\",\n## \"./metric-results/clustering-similarity-result-split-join-genus.tsv\",\n## \"./metric-results/clustering-similarity-result-split-join-species.tsv\"\n\n \n## )\n\n\n\nfor(f in files){\n \n clustering <- read.csv(f, sep = \"\\t\", header = TRUE)\n\n ## Geom all\n outFile <- paste(f,'pdf', sep = \".\")\n min_point <- clustering[which.min(clustering$metric),]\n max_point <- clustering[which.max(clustering$metric),]\n\n lineD <- ggplot(\n clustering,\n aes_string(x=\"distance\", y = \"metric\", linetype=\"clustering.method\", colour=\"sketch.size\")\n ) + geom_line(size = 0.2) + geom_point(size=0.5) + geom_point(data = min_point, aes_string(x=\"distance\",y=\"metric\"), color = \"black\", size = 0.5) + geom_text(vjust=2, data = min_point, size = 1.2, aes(label = paste(sketch.size, \" (\",distance, \" ; \",round(metric, digits = 4), \")\", sep=\"\"))) + theme(text = element_text(size=4)) + geom_point(data = max_point, aes_string(x=\"distance\",y=\"metric\"), color = \"black\", size = 0.5) + geom_text(vjust=2, data = max_point, size = 1.2, aes(label = paste(sketch.size, \" (\",distance, \" ; \",round(metric, digits = 4), \")\", sep=\"\"))) + theme(text = element_text(size=4)) + ggtitle(f)\n ggsave(outFile, plot = lineD)\n\n\n\n\n\n ## Violin all\n outFileViolin <- paste(f,'violin','pdf', sep = \".\")\n clustering_filtered <- subset(clustering, distance <= 0.07 & distance > 0.02)\n violinDraw <- ggplot(clustering_filtered, aes(x = distance, y = metric)) + geom_violin(trim=FALSE, aes(group = distance), draw_quantiles = c(0.25, 0.5, 0.75)) + geom_point(aes_string(colour=\"sketch.size\", shape=\"clustering.method\", stroke=FALSE))\n ggsave(outFileViolin, plot = violinDraw)\n\n\n\n\n\n\n ## 21* louvain\n outFile21 <- paste(f,'21','pdf', sep = \".\")\n clustering_21_louvain <- subset(clustering, ( clustering.method == \"louvain\" | clustering.method == \"silix\" ) & (sketch.size == \"21-1000\" | sketch.size == \"21-5000\" | sketch.size == \"21-10000\" | sketch.size == \"21-50000\"))\n min_point <- clustering_21_louvain[which.min(clustering$metric),]\n max_point <- clustering_21_louvain[which.max(clustering$metric),]\n louvain_21 <- ggplot(\n clustering_21_louvain,\n aes_string(x=\"distance\", y = \"metric\", linetype=\"clustering.method\", colour=\"sketch.size\")\n ) + geom_line(size = 0.2) + geom_point(size=0.5) + geom_point(data = min_point, aes_string(x=\"distance\",y=\"metric\"), color = \"black\", size = 0.5) + geom_text(vjust=2, data = min_point, size = 1.2, aes(label = paste(sketch.size, \" (\",distance, \" ; \",round(metric, digits = 4), \")\", sep=\"\"))) + theme(text = element_text(size=4)) + geom_point(data = max_point, aes_string(x=\"distance\",y=\"metric\"), color = \"black\", size = 0.5) + geom_text(vjust=2, data = max_point, size = 1.2, aes(label = paste(sketch.size, \" (\",distance, \" ; \",round(metric, digits = 4), \")\", sep=\"\"))) + theme(text = element_text(size=4)) + ggtitle(f)\n ggsave(outFile21, plot = louvain_21)\n\n\n ## 21* louvain violin\n outFileViolin <- paste(f,'violin', '21', 'pdf', sep = \".\")\n clustering_21_louvain_subset <- subset(clustering_21_louvain, distance <= 0.1 & distance >= 0.03)\n violinDraw <- ggplot(clustering_21_louvain_subset, aes(x = distance, y = metric)) + geom_violin(trim=FALSE, aes(group = distance), draw_quantiles = c(0.25, 0.5, 0.75)) + geom_point(aes_string(colour=\"sketch.size\", shape=\"clustering.method\", stroke=FALSE))\n ggsave(outFileViolin, plot = violinDraw)\n\n\n ## 18* louvain\n outFile18 <- paste(f,'18','pdf', sep = \".\")\n clustering_18_louvain <- subset(clustering, ( clustering.method == \"louvain\" | clustering.method == \"silix\" ) & (sketch.size == \"18-1000\" | sketch.size == \"18-5000\" | sketch.size == \"18-10000\" | sketch.size == \"18-50000\"))\n min_point <- clustering_18_louvain[which.min(clustering$metric),]\n max_point <- clustering_18_louvain[which.max(clustering$metric),]\n louvain_18 <- ggplot(\n clustering_18_louvain,\n aes_string(x=\"distance\", y = \"metric\", linetype=\"clustering.method\", colour=\"sketch.size\")\n ) + geom_line(size = 0.2) + geom_point(size=0.5) + geom_point(data = min_point, aes_string(x=\"distance\",y=\"metric\"), color = \"black\", size = 0.5) + geom_text(vjust=2, data = min_point, size = 1.2, aes(label = paste(sketch.size, \" (\",distance, \" ; \",round(metric, digits = 4), \")\", sep=\"\"))) + theme(text = element_text(size=4)) + geom_point(data = max_point, aes_string(x=\"distance\",y=\"metric\"), color = \"black\", size = 0.5) + geom_text(vjust=2, data = max_point, size = 1.2, aes(label = paste(sketch.size, \" (\",distance, \" ; \",round(metric, digits = 4), \")\", sep=\"\"))) + theme(text = element_text(size=4)) + ggtitle(f)\n ggsave(outFile18, plot = louvain_18)\n\n\n ## 18* louvain violin\n outFileViolin <- paste(f,'violin', '18', 'pdf', sep = \".\")\n clustering_18_louvain_subset <- subset(clustering_18_louvain, distance <= 0.1 & distance >= 0.03)\n violinDraw <- ggplot(clustering_18_louvain_subset, aes(x = distance, y = metric)) + geom_violin(trim=FALSE, aes(group = distance), draw_quantiles = c(0.25, 0.5, 0.75)) + geom_point(aes_string(colour=\"sketch.size\", shape=\"clustering.method\", stroke=FALSE))\n ggsave(outFileViolin, plot = violinDraw)\n\n\n \n\n ## 18* louvain\n ## outFile18 <- paste(f,'18','pdf', sep = \".\")\n ## clustering_18_louvain <- subset(clustering, clustering.method == \"louvain\" & (sketch.size == \"18-1000\" | sketch.size == \"18-5000\" | sketch.size == \"18-10000\" | sketch.size == \"18-50000\"))\n ## min_point <- clustering_18_louvain[which.min(clustering$metric),]\n ## max_point <- clustering_18_louvain[which.max(clustering$metric),]\n ## louvain_18 <- ggplot(\n ## clustering_18_louvain,\n ## aes_string(x=\"distance\", y = \"metric\", linetype=\"clustering.method\", colour=\"sketch.size\")\n ## ) + geom_line(size = 0.2) + geom_point(size=0.5) + geom_point(data = min_point, aes_string(x=\"distance\",y=\"metric\"), color = \"black\", size = 0.5) + geom_text(vjust=2, data = min_point, size = 1.2, aes(label = paste(sketch.size, \" (\",distance, \" ; \",round(metric, digits = 4), \")\", sep=\"\"))) + theme(text = element_text(size=4)) + geom_point(data = max_point, aes_string(x=\"distance\",y=\"metric\"), color = \"black\", size = 0.5) + geom_text(vjust=2, data = max_point, size = 1.2, aes(label = paste(sketch.size, \" (\",distance, \" ; \",round(metric, digits = 4), \")\", sep=\"\"))) + theme(text = element_text(size=4)) + ggtitle(f)\n ## ggsave(outFile18, plot = louvain_18)\n\n}\n\n" }, { "alpha_fraction": 0.6118881106376648, "alphanum_fraction": 0.632867157459259, "avg_line_length": 56.20000076293945, "blob_id": "920c004a857bc5e9ef868e55e6a101877467ffeb", "content_id": "70546fb6cecd69989ab9528af68f2556b6396aa9", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Shell", "length_bytes": 286, "license_type": "no_license", "max_line_length": 118, "num_lines": 5, "path": "/scripts/external/archive-trees.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n## Se mettre dans le dossier na (faire un lien symbolique du script)\nfind ./ -maxdepth 5 -type d -name trees | cut -d '/' -f1,2,3,4,5 > list-tree-dir.txt\njobify -- 'for f in `cat list-tree-dir.txt`; do echo $f; tar --remove-files -cf \"${f}/trees.tar\" \"${f}/trees/\" ; done'\n" }, { "alpha_fraction": 0.6246006488800049, "alphanum_fraction": 0.6285942196846008, "avg_line_length": 23.54901885986328, "blob_id": "ec55709dd72837909051e9ffe79a2c5d36f85430", "content_id": "f6002d75ef3d353b4233018340d0ca09bc323e86", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1252, "license_type": "no_license", "max_line_length": 91, "num_lines": 51, "path": "/scripts/external/convert-org-to-int-id.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport sys\nimport os.path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-e\", \"--edges\",\n nargs='?',\n type = argparse.FileType('r'),\n default=sys.stdin,\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\n# parser.add_argument(\n# \"-o\", \"--output\",\n# # nargs='?',\n# # type = argparse.FileType('w'),\n# help = \"File that contains the distances between each genomes (on distance per-line)\"\n# )\n\nargs = parser.parse_args()\nbasename = os.path.splitext(os.path.basename(args.edges.name))[0]\n\n\noutputDico = basename + '-dico.tsv'\noutputEdges= basename + '-customid.tsv'\n\ndic_out = open(outputDico, 'w')\nedges_out = open(outputEdges, 'w')\n\n\nold_to_new_id = dict()\n\ndef set_id_to_node(node_id, dic):\n if node_id not in dic:\n dic[node_id] = str(len(dic))\n return dic[node_id]\n\nargs.edges.readline()\nfor line in args.edges:\n line_tr = line.strip()\n columns = line_tr.split(\"\\t\")\n columns[0] = set_id_to_node(columns[0],old_to_new_id)\n columns[1] = set_id_to_node(columns[1],old_to_new_id)\n edges_out.write(\"\\t\".join(columns)+\"\\n\")\n \n\nfor k,v in old_to_new_id.items():\n dic_out.write(k + \"\\t\" + v + \"\\n\")\n" }, { "alpha_fraction": 0.7318840622901917, "alphanum_fraction": 0.7318840622901917, "avg_line_length": 26.600000381469727, "blob_id": "c5bc06d81b815dae23e0a5e72a311ff07aec23a5", "content_id": "c38b962118b6011bfe4deea725f9a41681fd5c87", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Shell", "length_bytes": 138, "license_type": "no_license", "max_line_length": 82, "num_lines": 5, "path": "/scripts/external/visual_report/build-elm.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nelm-make Main.elm --warn --output elm.js\n\nelm-make src/modules/Rank.elm src/modules/MashTree.elm --warn --output mashTree.js\n" }, { "alpha_fraction": 0.6547433733940125, "alphanum_fraction": 0.6702954769134521, "avg_line_length": 21.172412872314453, "blob_id": "aff73cdb45c12941b21f7d4e5436bca812585144", "content_id": "5d5079fdaa1e10177d7c5726d17bf54f477b5408", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 643, "license_type": "no_license", "max_line_length": 52, "num_lines": 29, "path": "/scripts/external/visual_report/tree.js", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "var node = document.getElementById('elm-container');\nvar app = Elm.MashTree.embed(node);\nclusterTree = clusterTree();\nconsole.log(window.location);\nvar urlA = window.location.href.split(/\\//);\nurlA.pop();\nvar url = urlA.join('/');\nconsole.log(url);\n\n\n\n//app.ports.clusterId.send(window.location.search);\n\napp.ports.url.send([url, window.location.search]);\n\napp.ports.calculateTree.subscribe(function(params){\n});\n\n\napp.ports.drawTree.subscribe(function(params){\n console.log(params);\n var tree = params[0];\n var rank = params[1];\n \n d3.select('g.tree')\n .datum(tree)\n .call(clusterTree, 1500, 500, rank);\n \n});\n" }, { "alpha_fraction": 0.7346437573432922, "alphanum_fraction": 0.7346437573432922, "avg_line_length": 57.14285659790039, "blob_id": "2f2227e519911576f83322e6fb95747820ed5cf5", "content_id": "8dbea26476031cffe6f737abb024a2de7f8217ea", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Shell", "length_bytes": 407, "license_type": "no_license", "max_line_length": 241, "num_lines": 7, "path": "/scripts/external/concatenateProgenomeRandIndexNoSingleton.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nout=\"/env/cns/home/rplanel/my_proj/test/mash/data/rand-index/progenome/rand-indexes-no-singleton.csv\"\n\necho -e \"distance Rand HA MA FM Jaccard\" > $out\n\nfind /env/cns/home/rplanel/my_proj/test/mash/data/runs/progenome/na/one-nextflow/ -name work -prune -o -name distance-matrices -prune -o -name graph -prune -o -name trees -prune -o -name rand-index-no-singleton.csv -print | xargs cat >> $out\n" }, { "alpha_fraction": 0.6241922974586487, "alphanum_fraction": 0.6332385540008545, "avg_line_length": 48.57692337036133, "blob_id": "cc83d856e312c06e006a4d9eac391649eb3ef45e", "content_id": "63c00aa426fed8afab7ee56becbae7026062172d", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "R", "length_bytes": 3869, "license_type": "no_license", "max_line_length": 231, "num_lines": 78, "path": "/scripts/external/rand-index-plot.R", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "library(ggplot2)\nlibrary(reshape)\nlibrary(dplyr)\n\nparams = commandArgs(trailingOnly=TRUE)\nargs <- strsplit(params, \" \")\nfileRandIndex <- args[[1]]\noutFile <- args[[2]]\n\n\nrand_indexes = read.csv(fileRandIndex, sep=\" \",dec=\".\")\nrand_indexes_melt <- melt(rand_indexes, id = c(\"distance\"))\nrand_indexes_melt %>% group_by(variable) %>% dplyr::summarise(min = min(value), max = max(value)) -> rand_indexes_melt.2\nleft_join(rand_indexes_melt, rand_indexes_melt.2) %>% mutate(color = value == min | value == max) %>% filter(color == TRUE) -> rand_indexes_melt.3\nggplot(rand_indexes_melt, aes(x = distance, y = value, colour = variable)) + geom_line() + geom_point(data=rand_indexes_melt.3, aes(x=distance, y = value)) + geom_text(data=rand_indexes_melt.3, aes(label=distance),hjust=0, vjust=0)\nggsave(outFile)\n##genus <- subset(rand_indexes, taxon == \"genus\")\n##\n## + geom_point(data=speciesJoin.3, aes(x=distance, y = value))\n## for (rank in c(\"species\", \"genus\")) {\n## taxa <- subset(rand_indexes, taxon == rank)\n## ##taxa <- subset(rand_indexes, taxon == taxon)\n## for (randMethod in c(\"Rand\",\"HA\", \"MA\", \"FM\", \"Jaccard\") ) {\n## taxaMethod <- subset(taxa, select = c(\"seqType\", \"distance\", randMethod))\n## taxaMethodJoin <- melt(taxaMethod, id = c(\"distance\", \"seqType\"))\n## taxaMethodJoin %>% group_by(variable) %>% summarise(min = min(value), max = max(value)) -> taxaMethodJoin.2\n## left_join(taxaMethodJoin, taxaMethodJoin.2) %>% mutate(color = value == min | value == max) %>% filter(color == TRUE) -> taxaMethodJoin.3\n## ggplot(taxaMethodJoin, aes(x = distance, y = value, colour = seqType)) + geom_line() + ggtitle(paste(rank, randMethod)) + geom_point(data=taxaMethodJoin.3, aes(x=distance, y = value))\n## out <- paste(rank, \"-rand-\", randMethod,\".pdf\", sep=\"\")\n## ggsave(out)\n## }\n## }\n\n## speciesMA <- subset(species, select = c(\"seqType\", \"distance\", \"MA\"))\n## ##speciesNA <- subset(species, seqType == \"na\")\n## ##speciesAA <- subset(species, seqType == \"aa\")\n\n\n## ## species[1] <- NULL\n## ## species[2] <- NULL\n## ## genus[2] <- NULL\n\n## ## genus[1] <- NULL\n\n## ## speciesAA[1] <- NULL\n## ## speciesAA[2] <- NULL\n## ## speciesAAJoin <- melt(species, id = c(\"distance\"))\n## ## speciesJoin <- melt(species, id = c(\"seqType\", \"distance\"))\n## ##genusJoin <- melt(genus, id = \"distance\")\n\n\n## speciesMAJoin <- melt(speciesMA, id = c(\"distance\", \"seqType\"))\n\n## ggplot(speciesMAJoin, aes(x = distance, y = value, colour = seqType)) + geom_line()\n## ggsave(\"species-rand.pdf\")\n\n\n##speciesMAJoin %>% group_by(variable) %>% summarise(min = min(value), max = max(value)) -> speciesMAJoin.2\n## left_join(speciesMAJoin, speciesMAJoin.2) %>% mutate(color = value == min | value == max) %>% filter(color == TRUE) -> speciesMAJoin.3\n\n\n## speciesJoin %>% group_by(variable) %>% summarise(min = min(value), max = max(value)) -> speciesJoin.2\n## left_join(speciesJoin, speciesJoin.2) %>% mutate(color = value == min | value == max) %>% filter(color == TRUE) -> speciesJoin.3\n\n\n## speciesAAJoin %>% group_by(variable) %>% summarise(min = min(value), max = max(value)) -> speciesAAJoin.2\n## left_join(speciesAAJoin, speciesAAJoin.2) %>% mutate(color = value == min | value == max) %>% filter(color == TRUE) -> speciesAAJoin.3\n\n\n\n\n## ggplot(speciesJoin, aes(x = distance, y = value, colour = variable)) + geom_line() + geom_point(data=speciesJoin.3, aes(x=distance, y = value))\n## ggsave(\"species-rand.pdf\")\n\n## genusJoin %>% group_by(variable) %>% summarise(min = min(value), max = max(value)) -> genusJoin.2\n## left_join(genusJoin, genusJoin.2) %>% mutate(color = value == min | value == max) %>% filter(color == TRUE) -> genusJoin.3\n## ggplot(genusJoin, aes(x = distance, y = value, colour = variable)) + geom_line() + geom_point(data=genusJoin.3, aes(x=distance, y = value))\n## ggsave(\"genus-rand.pdf\")\n \n" }, { "alpha_fraction": 0.6216779351234436, "alphanum_fraction": 0.6331422328948975, "avg_line_length": 41.64444351196289, "blob_id": "74ef066689c7bea3e4eb0e1a1e73b73827de3241", "content_id": "eef1d288c1f165eba01e4c15c8e4a059430a7a9b", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Shell", "length_bytes": 1919, "license_type": "no_license", "max_line_length": 324, "num_lines": 45, "path": "/scripts/external/extractOrgSeq.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\noidsFile=$1\n\n\nfor line in $(cat $oidsFile) \ndo \n mysql --max_allowed-packet=1G -ABNqr pkgdb_dev -e \\\n\t \"SELECT strtofastaudf(O_id,IF(C_id IS NULL,S_string,gotonucudf(S_string, C_begin, C_end, '+1'))) \n FROM Organism \n INNER JOIN Replicon USING(O_id) \n INNER JOIN Sequence USING(R_id) \n INNER JOIN Sequence_String USING(S_id) \n LEFT JOIN Contig USING(S_id) \n WHERE O_id = $line\n AND S_status = 'inProduction';\" | \\\n\n awk '$0==\"NULL\"{print \"Error: bad sequence extraction \" > \"/dev/stderr\"; exit 1} /^>/{ Oid=$1; sub(\">\",\"\",Oid); fileout=Oid\".fna.gz\";} {print $0 | \"gzip -c > \"fileout }'\n\ndone\n\n\n\n# mysql --max_allowed-packet=1G -ABNqr pkgdb_dev -e \"SELECT O_id, IF(C_id IS NULL,'no contig',gotonucudf(S_string,C_begin, C_end, '+1')) FROM Organism INNER JOIN Replicon USING(O_id) INNER JOIN Sequence USING(R_id) INNER JOIN Sequence_String USING(S_id) LEFT JOIN Contig USING(S_id) WHERE S_status = 'inProduction';\" > log\n\n\n\n\n# mysql -u$MYAGCUSER -p$MYAGCPASS -h$MYAGCHOST --max_allowed-packet=1G -ABNqr $MYAGCDB -e \"\n# SELECT strtofastaudf(O_id,IF(C_id IS NULL,S_string,gotonucudf(S_string, C_begin, C_end, '+1'))) \n# FROM Organism \n# INNER JOIN Replicon USING(O_id)\n# INNER JOIN Sequence USING(R_id) \n# INNER JOIN Sequence_String USING(S_id)\n# LEFT JOIN Contig USING(S_id) \n# WHERE S_status = 'inProduction' AND O_id IN (31,56,2751);\" | awk '/^>/{ Oid=$1; sub(\">\",\"\",Oid); fileout=Oid\".fna\"} {print $0 > fileout}'\n\n\n\n\n# mysql --max_allowed-packet=1G -ABNqr pkgdb_dev -e \\\n# \\\"SELECT strtofastaudf(CONCAT_WS(' ',O_id, O_name, name_txt),S_string) \\\n# FROM Organism LEFT JOIN O_Taxonomy USING(O_id) INNER JOIN Replicon USING(O_id) INNER JOIN Sequence USING(R_id) \\\n# INNER JOIN Sequence_String USING(S_id) \\\n# WHERE rank = 'order' AND S_status = 'inProduction' AND O_id=${oid}\\\" > ${filenameOut}\n" }, { "alpha_fraction": 0.6741405129432678, "alphanum_fraction": 0.687593400478363, "avg_line_length": 43.33333206176758, "blob_id": "a4601578a08281a08d88de8e4a9794f1ae8c6245", "content_id": "14e0f7a7ca025e342df770f82e2da0240e22b3a7", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Shell", "length_bytes": 669, "license_type": "no_license", "max_line_length": 243, "num_lines": 15, "path": "/scripts/external/concatenateMicroscopeRandIndex.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nout=\"/env/cns/home/rplanel/my_proj/test/mash/data/rand-index/microscope\"\n\nmodule load r/3.3.1\n\n\nfor taxa in \"species\" \"genus\" \"family\" \"order\" \"class\" \"phylum\"\ndo\n echo $taxa_out\n taxa_out=\"$out/${taxa}-rand-indexes.csv\"\n echo -e \"distance Rand HA MA FM Jaccard\" > $taxa_out \n find /env/cns/home/rplanel/my_proj/test/mash/data/runs/microscope/na/21-1000/ -name work -prune -o -name distance-matrices -prune -o -name graph -prune -o -name trees -prune -o -name \"rand-index-${taxa}.csv\" -print | xargs cat >> $taxa_out\n Rscript /env/cns/home/rplanel/my_proj/test/mash/scripts/rand-index-plot.R $taxa_out \"${taxa}-rand-index-plot.pdf\"\ndone\n\n\n\n\n" }, { "alpha_fraction": 0.5695921182632446, "alphanum_fraction": 0.5767614245414734, "avg_line_length": 24.764331817626953, "blob_id": "98f6cd526d19aa9347959e17d6331b778c59a590", "content_id": "174c1c97090a6155f0ec6bab646a8da286a8192f", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 4045, "license_type": "no_license", "max_line_length": 98, "num_lines": 157, "path": "/scripts/external/calculate-cliques.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport sys\nimport os.path\nimport os.path\nimport fileinput\nimport gzip\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-e\", \"--edges\",\n nargs='?',\n type = argparse.FileType('r'),\n default=sys.stdin,\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\nparser.add_argument(\n \"-o\", \"--output\",\n # nargs='?',\n # type = argparse.FileType('w'),\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\n# parser.add_argument(\n# \"-ci\", \"--cluster_id\",\n# help = \"Cluster's identifier.\"\n# )\n\n\nargs = parser.parse_args()\n\n#os.path.basename(\"\")\n\n\nbasename = os.path.basename(args.edges.name).split('.')[0]\nprint(basename)\nqc_node_id_output = basename + '-qc-node-id.txt'\n\noutput_tmp = open(qc_node_id_output, 'w')\nold_to_new_id = dict()\ncount = 0\nlinks_count = 0\nlinks_set = set()\nmax_clique_group = set()\n\ndef merge(sets):\n \"\"\"Niklas B.\"\"\"\n merged = 1\n while merged:\n merged = 0\n results = []\n while sets:\n common, rest = sets[0], sets[1:]\n sets = []\n for x in rest:\n if x.isdisjoint(common):\n sets.append(x)\n else:\n merged = 1\n common |= x\n results.append(common)\n sets = results\n return sets\n\ndef che_merge(sets):\n \"\"\"ChessMaster\"\"\"\n results = []\n upd, isd, pop = set.update, set.isdisjoint, sets.pop\n while sets:\n if not [upd(sets[0],pop(i)) for i in range(len(sets)-1,0,-1) if not isd(sets[0],sets[i])]:\n results.append(pop(0))\n return results\n\n\ndef set_id_to_node(node_id, dic):\n if node_id not in dic:\n dic[node_id] = str(len(dic))\n return dic[node_id]\n\n\n# print(\"## Start read edges file\")\n\n# args.edges.readline()\n# for line in args.edges:\n# line_tr = line.strip()\n# columns = line_tr.split(\"\\t\")\n# db_ids = [columns[0], columns[1]]\n# new_edges = [ set_id_to_node(node, old_to_new_id) for node in db_ids];\n\n# ## qc needs to have v1,v2 and v2,v1 but not v1,v1.\n# ## I force the output to look like that and I record the links\n# if new_edges[0] != new_edges[1]:\n# link_id = \",\".join(sorted(new_edges))\n# if link_id not in links_set:\n# links_set.add(link_id)\n# for link in [ ','.join(new_edges), ','.join(reversed(new_edges)) ]:\n# output_tmp.write(link+\"\\n\")\n\n \n# output_tmp.close()\n# print(\"## End read edges file\")\n# #output = open(args.output, 'w')\n\n# lines_to_prepend = str(len(old_to_new_id)) + \"\\n\" + str(len(links_set) * 2)\n# f = fileinput.input(qc_node_id_output, inplace=True)\n# for line in f:\n# line = line.strip()\n# if f.isfirstline():\n# print(lines_to_prepend.rstrip('\\r\\n') + '\\n' + line)\n# else:\n# print(line)\n\n# f.close()\n\n# print(\"Start the max clique calculation\")\n\n# os.system('qc --input-file=' + qc_node_id_output + ' --algorithm=hybrid > qc-cliques.output')\n\n# print(\"End the max clique calculation\")\n\nmax_cliques = open('qc-cliques.output', 'r')\n\n\nclique_sets = []\nline_number = 0\nfor line in max_cliques:\n line_number += 1\n if line_number >= 3:\n if line_number == 3:\n print(line)\n line_tr = line.strip()\n columns = line_tr.split(\" \")\n clique_sets.append(set(columns))\n\n\n#print(clique_sets)\n#clique_groups = merge(clique_sets)\nprint('## Start merge')\nclique_groups = che_merge(clique_sets)\nprint(\"Nombre de clique group : \" + str(len(clique_groups)))\n\n# clique_file = open(args.output, 'w')\n\n# new_to_old_id = {v: k for k, v in old_to_new_id.items()}\n\n# for i, clique_group in enumerate(clique_groups):\n# for genome in clique_group:\n# clique_file.write(str(i)+\"\\t\"+ new_to_old_id[genome]+\"\\n\")\n\n #open(args.cluster_id+'-'+str(i))\n # for node in nodes:\n # print(str(i) + \"\\t\" + str(node))\n \n\n#pickle.dump(progenome_to_id, open( \"node-id-to-int.dict\", \"wb\" ) )\n" }, { "alpha_fraction": 0.6155440211296082, "alphanum_fraction": 0.6196891069412231, "avg_line_length": 19.978260040283203, "blob_id": "88406b3307581f06245fa55f8a40c9238c73ab8d", "content_id": "7b4f2336a4ff8b69b1a3ea02674558833c3896c8", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 965, "license_type": "no_license", "max_line_length": 89, "num_lines": 46, "path": "/scripts/external/replace-custom-id.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport sys\nimport os.path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-c\", \"--clusters\",\n nargs='?',\n type = argparse.FileType('r'),\n default=sys.stdin,\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\n\nparser.add_argument(\n \"-d\", \"--dico\",\n help = \"dico\"\n)\n\n\nparser.add_argument(\n \"-o\", \"--output\",\n nargs='?',\n type = argparse.FileType('w'),\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\nargs = parser.parse_args()\n\n\ndico = open(args.dico, 'r')\nid_dico = dict()\n\nfor line in dico:\n line_tr = line.strip()\n columns = line_tr.split(\"\\t\")\n id_dico[columns[1]] = columns[2]\n\nargs.clusters.readline()\nfor line in args.edges:\n line_tr = line.strip()\n columns = line_tr.split(\"\\t\")\n res = [ columns[1], id_dico[columns[0]] ]\n args.output.write(\"\\t\".join(columns))\n" }, { "alpha_fraction": 0.6118106842041016, "alphanum_fraction": 0.630916178226471, "avg_line_length": 21.144229888916016, "blob_id": "aa69aefb46e9b960616abf3e16c131edc6fb6b17", "content_id": "b65b11f610ae01815811591bb182f2bf885011a1", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 2303, "license_type": "no_license", "max_line_length": 74, "num_lines": 104, "path": "/scripts/external/visual_report/index.js", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "// var noUiSlider = require('nouislider');\n// var d3 = require('d3');\n// var NJ = require('neighbor-joining');\n// Setup Elm app\nvar node = document.getElementById('cluster-form')\nvar app = Elm.Main.embed(node);\n\n// Initialize D3 component\nvar piechart = piechart();\nvar parameters = parameters();\nvar histogram = histogram();\n\n// Set parameters\nvar piechartRadius = 80;\nvar numColumn = 4;\nvar href = window.location.href;\nvar url = href.replace(/index.html/,'');\n\nconsole.log(url);\n\n\n// Get the data\n\nvar dataCluster = rawClusterData;\n\nvar taxoCluster = rawRankData;\n\n\nconsole.log(dataCluster);\n\n// var taxoClusterNull = taxoCluster.species.filter(function(d) {\n// return (d.name === null || d.name === \"\");\n// });\n// console.log(taxoClusterNull);\n\n\n// Create the slider\nvar slider = document.getElementById('slider-degre');\nnoUiSlider.create(slider, {\n start: [1,2],\n step: 1,\n tooltips: [ true, true],\n range: {\n\t'min': [0],\n\t'max': [10]\n },\n});\n\nslider.noUiSlider.on('change', function(range){\n app.ports.sliderChange.send({\n 'min' : parseInt(range[0]),\n 'max' : parseInt(range[1])\n });\n \n});\n\n\napp.ports.dataClusters.send({\n \"distanceClusters\" : dataCluster,\n \"taxonomicClusters\": taxoCluster,\n \"displayedClusters\" : undefined,\n \"parameters\" : [ parametersData ],\n});\n\napp.ports.sliderRange.subscribe(function(range){\n slider.noUiSlider.updateOptions({\n\trange: {\n\t 'min': range[0],\n\t 'max': range[1]\n\t}\n });\n});\n\napp.ports.sliderValue.subscribe(function(range){\n slider.noUiSlider.set(range);\n});\n\n\napp.ports.deletePies.subscribe(function(params){\n d3.selectAll('g.piecharts').remove()\n});\n\napp.ports.draw.subscribe(function(params) {\n var data = params[0];\n var histoData = params[1];\n d3\n .select('svg')\n .attr('width',function(d){\n return (numColumn * (piechartRadius+200)) + 100;\n })\n .attr('height',function(d){\n return ((data.length/numColumn) * (piechartRadius+150)) + 700;\n });\n\n d3.select('g.histogram').datum(histoData).call(histogram,500,500);\n \n d3.select('g.clusters')\n .attr('transform', \"translate(0,400)\")\n .datum(data)\n .call(piechart,piechartRadius,piechartRadius,numColumn, url);\n});\n\n\n// For the MashTree\n" }, { "alpha_fraction": 0.6177924275398254, "alphanum_fraction": 0.622734785079956, "avg_line_length": 21.90566062927246, "blob_id": "5943df9a789850fe9d6b5cdc77f3f8de168d11d3", "content_id": "40ad3492b281662ac47ec86dda7e90a348a6ee72", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1214, "license_type": "no_license", "max_line_length": 91, "num_lines": 53, "path": "/scripts/external/merge-clustering-diff-per-rank.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-c\", \"--count\",\n nargs='?',\n type = argparse.FileType('r'),\n default=sys.stdin,\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\n# parser.add_argument(\n# \"-o\", \"--output\",\n# nargs='?',\n# type = argparse.FileType('w'),\n# help = \"File that contains the distances between each genomes (on distance per-line)\"\n# )\n\n# parser.add_argument(\n# '-w', '--weight',\n# help='Take into account the link weight',\n# action='store_true'\n# )\n\nargs = parser.parse_args()\n\ndef getKey(item):\n return item[0]\n\n\ndico_per_rank = dict()\nargs.count.readline()\nfor line in args.count:\n line_tr = line.strip()\n columns = line_tr.split('\\t')\n rank_id = columns[3]\n if rank_id not in dico_per_rank:\n dico_per_rank[rank_id] = [0, None, None]\n\n count = dico_per_rank[rank_id][0] + int(columns[0])\n dico_per_rank[rank_id] = [count, rank_id, columns[4]]\n\n\n\n\nsorted_line = sorted(dico_per_rank.values(), key=getKey,reverse=True)\n \nfor line in sorted_line:\n line_str = [str(l) for l in line]\n print('\\t'.join(line_str))\n" }, { "alpha_fraction": 0.5557987093925476, "alphanum_fraction": 0.5641137957572937, "avg_line_length": 23.836956024169922, "blob_id": "f38348430ba155355a8fc2128123367382ec5c01", "content_id": "944f4c2af1c571273d5ace16a193c51712d78781", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 2285, "license_type": "no_license", "max_line_length": 89, "num_lines": 92, "path": "/scripts/external/calculate-specificity-sensitivity.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport sys\nimport os.path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-c\", \"--clustering\",\n nargs='?',\n type = argparse.FileType('r'),\n default=sys.stdin,\n help = \"File that contains the clustering to compare\"\n)\n\nparser.add_argument(\n \"-o\", \"--output\",\n nargs='?',\n type = argparse.FileType('w'),\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\n\ndef clustering_to_link(clustering) :\n truth_list = list()\n for i, item in enumerate(clustering, start=0):\n j = i + 1\n while j < len(clustering):\n if item == clustering[j]:\n truth_list.append(True)\n else:\n truth_list.append(False)\n j = j+1\n return truth_list\n\ndef table_truth(to_test, ref):\n \n table_truth_dic = {\n 'TP': 0.,\n 'FP': 0.,\n 'FN': 0.,\n 'TN': 0.,\n }\n zipped = zip(to_test, ref)\n # print(zipped)\n for it in zipped:\n if it[0] == it[1]:\n if it[1] == True: ## TP\n table_truth_dic['TP'] = table_truth_dic['TP'] + 1\n else:\n table_truth_dic['TN'] = table_truth_dic['TN'] + 1\n else:\n if it[1] == True: ## FN\n table_truth_dic['FN'] = table_truth_dic['FN'] + 1\n else:\n table_truth_dic['FP'] = table_truth_dic['FP'] + 1\n \n return table_truth_dic\n \nargs = parser.parse_args()\n\nclustering_to_estimate = []\nref_clustering = []\n\nfor line in args.clustering:\n line_tr = line.strip()\n columns = line_tr.split(\"\\t\")\n clustering_to_estimate.append(int(columns[0]))\n ref_clustering.append(int(columns[1]))\n\n\n\n\nto_test_link = clustering_to_link(clustering_to_estimate)\n# print(to_test_link)\nref_link = clustering_to_link(ref_clustering)\n# print(ref_link)\ntable_truth = table_truth(to_test_link, ref_link)\n# print(table_truth)\n\n\n\n\nsensitivity = table_truth['TP'] / (table_truth['TP'] + table_truth['FN'])\n\nspecificity = 1\nif (table_truth['TN'] + table_truth['FP']) != 0:\n specificity = table_truth['TN'] / (table_truth['TN'] + table_truth['FP'])\n\n\n# print(\"sensitivity\\tspecificity\")\nprint(str(sensitivity) + '\\t' + str(specificity))\n" }, { "alpha_fraction": 0.604619562625885, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 39.55555725097656, "blob_id": "042469b8b9a662e77ad668723871f5e4a9144fe5", "content_id": "af8e44fe246a61fecf0b82cb1f0db25411194767", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "SQL", "length_bytes": 736, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/scripts/external/createMashTables.sql", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "\n\n\nCREATE TABLE `MASH_param` (\n `MASH_param_id` smallint(5) NOT NULL AUTO_INCREMENT,\n `distance` decimal(10,10) NOT NULL,\n `pvalue` double unsigned DEFAULT NULL,\n `kmer_size` int(11) DEFAULT NULL,\n `sketch_size` int(11) DEFAULT NULL,\n PRIMARY KEY (`MASH_param_id`),\n UNIQUE KEY `distance` (`distance`,`pvalue`,`kmer_size`,`sketch_size`)\n ) ENGINE=MyISAM AUTO_INCREMENT=1 DEFAULT CHARSET=latin1;\n\n\n\nCREATE TABLE `MASH_cluster` (\n `MASH_param_id` smallint(5) unsigned NOT NULL DEFAULT '0',\n `cluster_id` int(11) unsigned NOT NULL DEFAULT '0',\n `O_id` int(11) unsigned NOT NULL,\n PRIMARY KEY (`O_id`,`MASH_param_id`)\n ) ENGINE=MyISAM DEFAULT CHARSET=latin1;\n\n\n\n" }, { "alpha_fraction": 0.614844560623169, "alphanum_fraction": 0.6258776187896729, "avg_line_length": 24.564102172851562, "blob_id": "c64e2570922b0fd15809678414037626197b3e2e", "content_id": "2305c5d9cd2d313ba6e1e397c743c05257d5148f", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Shell", "length_bytes": 997, "license_type": "no_license", "max_line_length": 190, "num_lines": 39, "path": "/scripts/external/existsRecord.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\ndistance=$1\npvalue=$2\nkmerSize=$3\nsketchSize=$4\nseqType=$5\n\nhasParams=`mysql GO_SPE -ABNre \"SELECT EXISTS(SELECT MASH_param_id FROM MASH_param WHERE distance = $distance AND pvalue = $pvalue AND kmer_size = $kmerSize AND sketch_size = $sketchSize);\"`\n\n\n\nif [ $hasParams -ne 0 ]\nthen\n res=`mysql GO_SPE -ABNre \"SELECT MASH_param_id FROM MASH_param WHERE distance = $distance AND pvalue = $pvalue AND kmer_size = $kmerSize AND sketch_size = $sketchSize;\"`\n echo $res\n exit 0\nelse\n mysql GO_SPE -ABNre \"INSERT INTO MASH_param (distance, pvalue, kmer_size, sketch_size) VALUES ($distance, $pvalue, $kmerSize, $sketchSize);\"\n \n if [ $? -eq 0 ]\n then\n\tres=`mysql GO_SPE -ABNre \"\n SELECT MASH_param_id \n FROM MASH_param \n WHERE distance = $distance \n AND pvalue = $pvalue \n AND kmer_size = $kmerSize \n AND sketch_size = $sketchSize;\"`\n\t\n\techo $res\n\texit 0\n else\n\texit 1\n fi\nfi\n\nexit 0\n" }, { "alpha_fraction": 0.5977011322975159, "alphanum_fraction": 0.6091954112052917, "avg_line_length": 24.58823585510254, "blob_id": "f32c913993e0a7b948654870f6d4316aa99cf172", "content_id": "40215fbfa2dd2f3f111939176f9570234ff94644", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 435, "license_type": "no_license", "max_line_length": 72, "num_lines": 17, "path": "/scripts/external/silix-cluster-to-mash-cluster-table.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nimport re\n# from functools import reduce\n\ncluster_file = open(sys.argv[1], 'r')\nmash_param_id = sys.argv[2]\n\n\nfor line in cluster_file:\n stripped_line = line.strip()\n if stripped_line != '':\n list_column = stripped_line.split(\"\\t\")\n ## clean cluster id\n cluster_id = list_column[0].replace(\"CL\",\"\")\n print(mash_param_id + \"\\t\" + cluster_id + \"\\t\" + list_column[1])\n" }, { "alpha_fraction": 0.8301886916160583, "alphanum_fraction": 0.8301886916160583, "avg_line_length": 51.5, "blob_id": "6ff7d90a2c34526877b6a6d7ea4ac5065c82d598", "content_id": "e23106b8964dc4f7e7d0d3d9b87f3cee16583200", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 106, "license_type": "no_license", "max_line_length": 83, "num_lines": 2, "path": "/README.md", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "# species-clustering\nNextflow workflow that uses mash to cluster organisms and compare these clustering.\n\n" }, { "alpha_fraction": 0.5846338272094727, "alphanum_fraction": 0.5942376852035522, "avg_line_length": 33.375, "blob_id": "201cfb542feb1a53af5eb31660cc9bca95e63896", "content_id": "412d4adbced7fa147c8a54e38d5992a05012bf6b", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Shell", "length_bytes": 833, "license_type": "no_license", "max_line_length": 210, "num_lines": 24, "path": "/scripts/external/extractProteome.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\noidsFile=$1\n\n\n\n\nfor line in $(cat $oidsFile) \ndo \n mysql --max_allowed-packet=1G -ABNqr pkgdb_ZR -e \\\n \"SELECT strtofastaudf(CONCAT_WS('|',GO_id,O_id),gotoprotudf(S_string, GO_begin, GO_end, GO_frame,CONVERT(IF(GO_mutation='selenocysteine',124,IF(GO_mutation='pyrrolysine',125,R_genetic_code)),UNSIGNED) )) \n FROM Genomic_Object G \n INNER JOIN Sequence_String SS USING(S_id) \n INNER JOIN Sequence S USING(S_id) \n INNER JOIN Replicon R USING(R_id) \n INNER JOIN Organism USING(O_id) \n WHERE O_id = $line \n AND GO_type IN('CDS','fCDS') \n AND S.S_status = 'inProduction'\n AND GO_evidence = 'automatic' ;\" | \\\n\tperl -ne 'if(/^>/){ chomp;($seqId, $oid) = split(/\\|/); open($O, \">>\", $oid.\".faa\"); print $O \"$seqId\\n\";} else {s/\\*//g;print $O $_}'\n\ndone\n\n\n\t \n" }, { "alpha_fraction": 0.3507002890110016, "alphanum_fraction": 0.35798320174217224, "avg_line_length": 29.775861740112305, "blob_id": "70f52c65e7d86123a9faec4bf3fbe374ba5733f4", "content_id": "14c1b10aa7e4db5e6173de8f90c2c1fdfe6a679d", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 5355, "license_type": "no_license", "max_line_length": 85, "num_lines": 174, "path": "/scripts/external/visual_report/src/d3/clusterTree.js", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "function clusterTree () {\n function exports(_selection, width, height, rank) {\n\t_selection.each(function(data, i) {\n console.log(rank);\n console.log(data);\n \n var tree = d3.tree()\n .size([width, height]);\n var lineHeight = 25;\n var colors = d3.scaleOrdinal(d3.schemeCategory10); //schemeCategory20b\n var root = d3.hierarchy(data);\n tree(root);\n var leaves = root.leaves();\n\n\t console.log(root);\n\t \n d3\n .select('svg')\n\t .attr('width', width+200)\n .attr('height', leaves.length * lineHeight + 50);\n \n /**\n * Functions\n */\n function getlength (node) {\n if (node.data.length != null) {\n return parseFloat(node.data.length);\n }\n else {\n return 0;\n }\n }\n\n\n function addTreeHeight (node) {\n if (node.children == undefined ) {\n node.left = 0;\n node.right = 0;\n return 1;\n }\n else {\n node.children[0].isRight = true;\n node.right = addTreeHeight(node.children[0]);\n node.children[1].isRight = false;\n node.left = addTreeHeight(node.children[1]);\n return node.left + node.right + 1;\n }\n }\n\n function addYposition(node) {\n if (node.children == undefined ) {\n return node.x;\n }\n else {\n \n var tot = node.children.reduce(function(prev,cur){\n return prev + addYposition(cur);\n },0);\n node.x = tot / node.children.length;\n return node.x;\n }\n }\n \n \n \n root\n .descendants()\n .forEach(function(d){\n var totalL = getlength(d);\n var parent = d.parent;\n while (parent) {\n var l = getlength(parent);\n totalL += l;\n parent = parent.parent;\n }\n d.data.depth = totalL;\n });\n\n \n leaves.forEach(function(node,i){\n node.x = (i * lineHeight) + 10;\n });\n\n \n \n var maxDepth = leaves.reduce(function(prev,curr){\n var depth = curr.data.depth;\n return (prev < depth) ? depth : prev;\n },0);\n\n \n \n var x = d3.scaleLinear()\n .domain([0, maxDepth])\n .range([0, tree.size()[0]]);\n\n root\n .descendants()\n .forEach(function(d){\n d.y = x(d.data.depth);\n });\n\n \n addYposition(root);\n //addTreeHeight(root);\n\n \n var container = d3.select(this);\n var link = container.selectAll(\".link\")\n .data(root.descendants().slice(1))\n .enter()\n .append(\"path\")\n .attr(\"class\", \"link\")\n .attr(\"d\", function(d) {\n return \"M\" + d.y + \" \" + d.x\n + \" H\" + d.parent.y \n + \" V\" + d.parent.x;\n \n\n });\n\n\n var dataSelection = container\n .selectAll(\".node\")\n .data(root.descendants());\n \n var nodeEnter = dataSelection\n .enter();\n \n var node = nodeEnter\n .append(\"g\")\n .attr(\"class\", function(d) {\n return \"node\" + (d.children ? \" node--internal\" : \" node--leaf\");\n })\n .attr(\"transform\", function(d) {\n return \"translate(\" + d.y + \",\" + d.x + \")\";\n });\n\n node.append(\"circle\")\n .attr(\"r\", 2.5);\n \n node.append(\"text\")\n .attr(\"dy\", 3)\n .attr(\"x\", 8);\n\n\n var update = nodeEnter\n .merge(dataSelection)\n .selectAll(\"text\")\n .style(\"text-anchor\", function(d) {\n return d.children ? \"end\" : \"start\";\n })\n .style('fill',function(d){\n var c = '';\n if (d.data.taxon && d.data.taxon.taxonomy[rank]) {\n c = colors(d.data.taxon.taxonomy[rank].name);\n }\n\t\t return c;\n })\n .text(function(d) {\n var text;\n if (d.data.taxon && d.data.taxon.taxonomy[rank]) {\n text = d.data.taxon.taxonomy[rank].name;\n }\n else {\n \n text = '';\n }\n return text;\n });\n });\n } \n return exports;\n}\n" }, { "alpha_fraction": 0.6430302858352661, "alphanum_fraction": 0.6503030061721802, "avg_line_length": 21.53424644470215, "blob_id": "6ea3bc65f9af4cda4fea2a9b061fe22394abcea5", "content_id": "6a055ae06e19c318fb8f281474751f2dd92dc16d", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1650, "license_type": "no_license", "max_line_length": 89, "num_lines": 73, "path": "/scripts/external/calculate-louvain-communities.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport sys\nimport os.path\nimport community\nimport networkx as nx\nimport gzip\n\ndef zipped(filename):\n mode = 'rt'\n try:\n f = gzip.open(filename, mode)\n except IOError:\n raise argparse.ArgumentError('')\n return f\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-e\", \"--edges\",\n nargs='?',\n type = zipped,\n default=sys.stdin,\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\nparser.add_argument(\n \"-o\", \"--output\",\n nargs='?',\n type = argparse.FileType('w'),\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\nparser.add_argument(\n '-w', '--weight',\n help='Take into account the link weight',\n action='store_true'\n)\n\nargs = parser.parse_args()\nG = nx.Graph()\n\n\ndef add_edge(G, columns):\n G.add_edge(columns[0], columns[1])\n\ndef add_weighted_edge(G, columns):\n G.add_edge( columns[0], columns[1], weight=(1-float(columns[2])) )\n \n\n\ncustom_add_edge = add_weighted_edge if args.weight else add_edge;\n\n\nfor line in args.edges:\n line_tr = line.strip()\n columns = line_tr.split('\\t')\n #db_ids = [columns[0], columns[1]]\n custom_add_edge(G,columns)\n #G.add_edge(columns[0], columns[1])\n \n\n#first compute the best partition\n# partition = community.best_partition(G)\n\n# partition = community.best_partition(G, weight='weight')\n\ndendrogram = community.generate_dendrogram(G, weight='weight')\npartition = community.partition_at_level(dendrogram, 0)\n\nfor k, v in partition.items():\n args.output.write(str(v) + \"\\t\" + k + \"\\n\")\n \n" }, { "alpha_fraction": 0.5441684722900391, "alphanum_fraction": 0.5655454397201538, "avg_line_length": 25.94915199279785, "blob_id": "ea2d7227c1ab883cce3f12b4b5a0c74c87328786", "content_id": "7c68d800f5085b9688d84e60a52d8c307795548f", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 3181, "license_type": "no_license", "max_line_length": 97, "num_lines": 118, "path": "/scripts/external/get-diff-clusterging.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-c\", \"--clustering\",\n nargs='?',\n type = argparse.FileType('r'),\n default=sys.stdin,\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\n# parser.add_argument(\n# \"-o\", \"--output\",\n# nargs='?',\n# type = argparse.FileType('w'),\n# help = \"File that contains the distances between each genomes (on distance per-line)\"\n# )\n\n# parser.add_argument(\n# '-w', '--weight',\n# help='Take into account the link weight',\n# action='store_true'\n# )\n\nargs = parser.parse_args()\n\ndef getKey(item):\n return item[0]\n\ncluster_to_node1 = dict()\ncluster_to_node2 = dict()\n\nnode_to_cluster = dict()\n\nid_node = 0\n\nres_count = dict()\n\nfor line in args.clustering:\n line_tr = line.strip()\n columns = line_tr.split('\\t')\n if columns[0] not in cluster_to_node1:\n cluster_to_node1[columns[0]] = set()\n \n if columns[1] not in cluster_to_node2:\n cluster_to_node2[columns[1]] = set()\n\n cluster_to_node1[columns[0]].add(id_node)\n cluster_to_node2[columns[1]].add(id_node)\n anno = columns[2:]\n node_to_cluster[id_node] = [columns[0], columns[1]] + anno\n id_node += 1\n \n # print(cluster_to_node1)\n # print(cluster_to_node2)\n\nfor clu_id1, clu1 in cluster_to_node1.items():\n smallest_diff = [1, None, None, set()]\n for clu_id2, clu2 in cluster_to_node2.items():\n # print('======new compare')\n # print(clu1)\n # print(clu2)\n union = clu1.union(clu2)\n # print('union')\n # print(union)\n if len(union) == 0:\n next\n else :\n difference = clu1.symmetric_difference(clu2)\n # print('diff')\n # print(difference)\n ratio = len(difference) / len(union)\n # print(ratio)\n if ratio < smallest_diff[0]:\n smallest_diff[0] = ratio\n smallest_diff[1] = clu_id1\n smallest_diff[2] = clu_id2\n smallest_diff[3] = difference\n\n \n res_count[clu_id1] = smallest_diff\n \n\n\n# print(res_count)\nsummary = dict()\nfor clu_id, res in res_count.items():\n for node in res[3]:\n if node not in summary:\n summary[node] = [1, node] + node_to_cluster[node]\n else:\n count = summary[node][0] + 1\n summary[node][0] = count\n\nprint(\"\\t\".join(['count', 'node_id', 'clu_id_1','clus_id_2','rank_name']))\nfor summ in summary.values():\n\n line = [ str(it) for it in summ]\n print('\\t'.join(line))\n# for node_id in node_to_cluster.keys():\n# cluster_in_1 = node_to_cluster[node_id][0]\n# cluster_in_2 = node_to_cluster[node_id][1]\n# max_diff = 0\n# for diff_node in cluster_to_node1[cluster_in_1].difference(cluster_to_node2[cluster_in_2]):\n# if node_id not in res_count:\n# res_count[node_id] = [1,node_to_cluster[node_id][2]]\n# else:\n# res_count[node_id][0] += 1 \n \n\n\n# res2 = sorted(res_count.values(), key=getKey,reverse=True)\n\n# for res in res2:\n# print(res[1] + \"\\t\" + str(res[0]))\n\n" }, { "alpha_fraction": 0.5823019742965698, "alphanum_fraction": 0.5878713130950928, "avg_line_length": 21.43055534362793, "blob_id": "62230925e4bbe30c32029f470ae53e7292ffac0f", "content_id": "19c2e6055c32a06a85ce6fc317f7546117afdf03", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 1616, "license_type": "no_license", "max_line_length": 70, "num_lines": 72, "path": "/scripts/external/calculate-nj-tree.js", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "var NJ = require('neighbor-joining');\nvar fs = require('fs');\nconst util = require('util');\n\nvar distanceFS;\ntry {\n distanceFS = fs.readFileSync(process.argv[2], 'utf8');\n} catch (err) {\n console.log('File \"'+process.argv[2]+'\" is size too big');\n process.exit(0);\n}\n\n\n//console.log(process.argv[2]);\n\n\nvar taxa = JSON.parse(fs.readFileSync(process.argv[3], 'utf8'));\nvar distance = JSON.parse(distanceFS);\nvar out = process.argv[4];\n\n// console.log(taxa.length);\n// console.log(distance.length);\n\n\n// distance.forEach(function(line) {\n// \tconsole.log(line.length);\n// });\n\nvar distanceFloat = distance.map(function(col) {\n return col.map(function(cell){\n return parseFloat(cell);\n });\n});\n\n\nvar famSize = taxa.length;\nif (famSize > 1) {\n var new_taxa = taxa.map(function(obj,i){\n\t//console.log(obj);\n\tif (obj) {\n\t var name = String(obj.name);\n\t if (name) {\n\t\tvar new_name = name.replace(/\\:|\\(|\\)|\\;/g, \"_\");\n\t\tobj.name = new_name;\n\t }\n\t\n\t return obj;\n\t}\n\telse {\n\t return {\n\t\tname: i\n\t };\n\t}\n });\n \n // console.log(new_taxa);\n // console.log(distanceFloat);\n // distanceFloat.forEach(function(line) {\n // \tconsole.log(line.length);\n // });\n var RNJ = new NJ.RapidNeighborJoining(distanceFloat, new_taxa);\n RNJ.run();\n var treeObject = RNJ.getAsObject();\n const treeNewick = RNJ.getAsNewick();\n var json = JSON.stringify(treeObject);\n fs.writeFile(out + '.json',json);\n fs.writeFile(out + '.nwk',treeNewick);\n}\nelse {\n fs.writeFile(out + '.json','{}');\n fs.writeFile(out + '.nwk', ';');\n}\n\n" }, { "alpha_fraction": 0.6188373565673828, "alphanum_fraction": 0.6298748850822449, "avg_line_length": 23.26785659790039, "blob_id": "572acd55eb32a23286a918550b15b6238ff0c72a", "content_id": "7a54155f88dd86f955625b7f4af06a2f5b4d989c", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1359, "license_type": "no_license", "max_line_length": 89, "num_lines": 56, "path": "/scripts/external/convert-to-infomap-input.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport argparse\nimport sys\nimport os.path\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-e\", \"--edges\",\n nargs='?',\n type = argparse.FileType('r'),\n default=sys.stdin,\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\nparser.add_argument(\n \"-o\", \"--output\",\n nargs='?',\n type = argparse.FileType('w'),\n help = \"File that contains the distances between each genomes (on distance per-line)\"\n)\n\nargs = parser.parse_args()\n\n\ncountNodes = 0\ncountLinks = 0\nnodesDico = dict()\nlinksSet = set()\n\nfor line in args.edges:\n line_tr = line.strip()\n columns = line_tr.split(\"\\t\")\n nodeId1 = columns[0]\n nodeId2 = columns[1]\n nodes = [nodeId1, nodeId2]\n linkId = str(nodeId1)+str(nodeId2)\n nodeLine = []\n # add nodes\n for nodeId in nodes:\n if nodeId1 not in nodesDico:\n nodesDico[nodeId] = countNodes\n countNodes += 1\n nodeLine.append(str(nodesDico[nodeId]))\n ## add the distance. 1-distance to make it like a weight.\n nodeLine.append(str(1-float(columns[2])))\n if linkId not in linksSet:\n linksSet.add(linkId)\n args.output.write(\"\\t\".join(nodeLine)+\"\\n\")\n\n\ndicoFile = open('new-to-original-id.tsv','w')\n\n\nfor k, v in nodesDico.items():\n dicoFile.write(str(v)+\"\\t\"+str(k)+\"\\n\")\n" }, { "alpha_fraction": 0.6021560430526733, "alphanum_fraction": 0.6328427791595459, "avg_line_length": 38.66289520263672, "blob_id": "20b0784399468506507271c29508d7b85ffc818c", "content_id": "c09858752f1228d82d098f9d1245543eede6fbc7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 17532, "license_type": "no_license", "max_line_length": 203, "num_lines": 442, "path": "/launchTest.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n# -with-dag workflow.pdf\n\ndir=\"./out\"\nmkdir -p $dir\n\n\nkmer=21\nsketchSize=1000\n\noutDir=\"$dir/${kmer}-${sketchSize}\"\nmkdir -p $outDir\n\n\n\n# d=0.03\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.037\n# p=1e-10\n# # nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# jobify -c 1 nextflow run mash-nextflow.nf -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.038\n# p=1e-10\n# # nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# jobify -c 1 nextflow run mash-nextflow.nf -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.039\n# p=1e-10\n# # nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# jobify -c 1 nextflow run mash-nextflow.nf -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n# d=0.04\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n# d=0.05\n# p=1e-10\n# jobify -c 8 nextflow run mash-nextflow.nf -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# # nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.06\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.065\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.07\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\nd=0.075\np=1e-10\n#nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\njobify -c 8 nextflow run mash-nextflow.nf -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 8 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\necho \"$sketchSize - $kmer - $d - $p\"\nsleep 5\n\n\n# d=0.08\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.09\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.1\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.11\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n# d=0.12\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.125\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n# d=0.13\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.14\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.15\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.16\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.17\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n\n#######################################################################\nkmer=21\nsketchSize=5000\noutDir=\"$dir/${kmer}-${sketchSize}\"\nmkdir -p $outDir\n\n\n# d=0.08\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.10\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n########################################################\n## s=400 k=16\n\n\nkmer=16\nsketchSize=400\noutDir=\"$dir/${kmer}-${sketchSize}\"\nmkdir -p $outDir\n\n# d=0.03\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.035\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.037\n# p=1e-10\n# jobify -c 8 nextflow run mash-nextflow.nf -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# # nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.04\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.05\n# p=1e-10\n# # nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# jobify -c 8 nextflow run mash-nextflow.nf -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.06\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.07\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.08\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.09\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.1\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.11\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.12\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.13\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.14\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.15\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.1\n# p=1e-100\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.15\n# p=1e-100\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n\n# d=0.2\n# p=1e-100\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue 1 --distance 0.03 > $outDir/1-0.03.out\n# sleep 5\n\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue 1 --distance 0.07 > $outDir/1-0.07.out\n# sleep 5\n\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue 1 --distance 0.1 > $outDir/1-0.1.out\n# sleep 5\n\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue 1 --distance 0.3 > $outDir/1-0.3.out\n# sleep 5\n\n#######################################################################\n# kmer=16\n# sketchSize=1000\n# outDir=\"$dir/${kmer}-${sketchSize}\"\n# mkdir -p $outDir\n\n\n# d=0.08\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n\n#######################################################################\nkmer=16\nsketchSize=5000\noutDir=\"$dir/${kmer}-${sketchSize}\"\nmkdir -p $outDir\n\n\n# d=0.08\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.10\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.12\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n\n#############################################################\n## s=5000 k=21\nkmer=21\nsketchSize=5000\noutDir=\"$dir/${kmer}-${sketchSize}\"\nmkdir -p $outDir\n\n# d=0.08\n# p=1e-10\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n\n# d=0.2\n# p=1e-100\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# ##################################################################\n# ## s=5000 k=27\n# kmer=27\n# sketchSize=5000\n# outDir=\"$dir/${kmer}-${sketchSize}\"\n# mkdir -p $outDir\n\n\n# d=0.3\n# p=1e-100\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.2\n# p=1e-100\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# #####################################################################\n# kmer=21\n# sketchSize=10000\n# outDir=\"$dir/${kmer}-${sketchSize}\"\n# mkdir -p $outDir\n\n\n# d=0.1\n# p=1e-100\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n\n# d=0.2\n# p=1e-100\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n# d=0.3\n# p=1e-100\n# nohup nextflow run mash-nextflow.nf -bg -profile cluster -w data/work -with-timeline -with-trace --sketchSize $sketchSize --kmerSize=$kmer --cpus 1 --pvalue $p --distance $d > \"$outDir/${p}-${d}.out\"\n# echo \"$sketchSize - $kmer - $d - $p\"\n# sleep 5\n\n" }, { "alpha_fraction": 0.6135371327400208, "alphanum_fraction": 0.624454140663147, "avg_line_length": 29.53333282470703, "blob_id": "e07c9aacb9e180e7c17cb7f986f5bc331d4052ad", "content_id": "15da3a00f8c02a9d0e2ff664a50f47a6f278fd8b", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "R", "length_bytes": 458, "license_type": "no_license", "max_line_length": 77, "num_lines": 15, "path": "/scripts/external/getSJIndex.R", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "require(igraph)\nparams = commandArgs(trailingOnly=TRUE)\nargs <- strsplit(params, \" \")\nfile <- args[[1]]\ndistance <- args[[2]]\ntaxa <- args[[3]]\n\nclusters <- read.csv(file, sep = \"\\t\")\nres <- split_join_distance(as.vector(clusters[[1]]),as.vector(clusters[[2]]))\nresString <- paste(res, collapse=\" \")\nline <- paste(distance, resString, sep=\" \")\n\nfileOut <- paste(\"./split-join-\", taxa,\".csv\", sep=\"\")\nprint(line)\nwrite(line, fileOut, sep = \"\\n\")\n" }, { "alpha_fraction": 0.6969696879386902, "alphanum_fraction": 0.7070707082748413, "avg_line_length": 21.5, "blob_id": "fc4276cb3a609540ffc8da0a8230b8c7bbceb239", "content_id": "83b1b9f9f6a2f1ddb127ef8c35eccf3d8f624564", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Shell", "length_bytes": 495, "license_type": "no_license", "max_line_length": 51, "num_lines": 22, "path": "/scripts/external/getOidsNa.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n\nmysql --max_allowed-packet=1G -ABNqr pkgdb_ZR -e \\\n\"\nSELECT DISTINCT O_id\nFROM Organism \nINNER JOIN Replicon USING(O_id) \nINNER JOIN Sequence USING(R_id) \nINNER JOIN Sequence_String USING(S_id) \nLEFT JOIN Contig USING(S_id) \nWHERE S_status = 'inProduction' \nAND S_id NOT IN (3142) \nAND O_id \nIN \n(SELECT O_id \nFROM Organism \nINNER JOIN Replicon USING(O_id) \nINNER JOIN Sequence USING(R_id) \nWHERE R_type IN ('chromosome','WGS') \nAND S_status = 'inProduction' GROUP BY O_id);\n\"\n" }, { "alpha_fraction": 0.5943613052368164, "alphanum_fraction": 0.6340621113777161, "avg_line_length": 30.600000381469727, "blob_id": "c293a9ef8ebee1b3e62ad2cf167c218f4a27cd6d", "content_id": "dba6f099bd523b3dc63721ceb10695660472135e", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 1738, "license_type": "no_license", "max_line_length": 231, "num_lines": 55, "path": "/scripts/external/extract-submatrix-on-taxo.py", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport sys\nimport re\n\n##.*|Escherichia coli 2864350\n\ndico_annotation = set();\ndir_out = r'/home/rplanel/test/vibrio-coli'\npattern_taxo = re.compile(\".*(\\t+Vibrio|\\t*Escherichia coli 2864350\\t*).*\")\n\nannotations_file = open(sys.argv[1], 'r')\ndistance_matrix_file = open(sys.argv[2], 'r')\nfiltered_distance_matrix = open(dir_out+'/out.txt', 'w')\nfiltered_annotation = open(dir_out+'/out-table.tsv', 'w')\n\n#7 633.PRJNA243530 Yersinia pseudotuberculosis 633 Yersinia pseudotuberculosis 629 Yersinia 1903411 Yersiniaceae 91347 Enterobacterales 1236 Gammaproteobacteria 1224 Proteobacteria\n\n\nfiltered_distance_matrix.write(\"node1\\tnode2\\tdistance\\tevalue\\tscore\\n\")\nfiltered_annotation.write(\"node_id\\tstrain_name\\tspecies_taxid\\tspecies\\tgenus_taxid\\tgenus\\tfamily_taxid\\tfamily\\torder_taxid\\torder\\tclass_taxid\\tclass\\tphylum_taxid\\tphylum\\n\")\n\n\n\n\n\nfor l in annotations_file:\n list_line = l.split(\"\\t\", 2)\n \n if pattern_taxo.match(list_line[2]):\n taxid = list_line[1]##.split(\".\")[0]\n # print(taxid)\n dico_annotation.add(taxid)\n ##print(list_line[2])\n filtered_annotation.write(taxid + \"\\t\" + list_line[2])\n\nfiltered_annotation.close()\n \nprint(\"Filtered the distance matrix\")\n \nfor l in distance_matrix_file:\n list_line = l.split(\"\\t\")\n taxids = {\n list_line[0],#.split(\".\")[0],\n list_line[1]#.split(\".\")[0]\n }\n distance = float(list_line[2])\n # print(taxids)\n \n if len(taxids.intersection(dico_annotation)) == 2 and distance < 0.5:\n # print(taxids)\n filtered_distance_matrix.write(l)\n \n\nfiltered_distance_matrix.close()\n" }, { "alpha_fraction": 0.4818219840526581, "alphanum_fraction": 0.49394065141677856, "avg_line_length": 22.120773315429688, "blob_id": "ec70fafc75d1a9d18ad4e662edb744d470444801", "content_id": "c9cf05e0ee57a13ea621f447c25af9151f11f330", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 4786, "license_type": "no_license", "max_line_length": 84, "num_lines": 207, "path": "/scripts/external/visual_report/src/d3/piechart.js", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "function piechart () {\n var limit = 20;\n var getAngle = function (d) {\n\treturn (180 / Math.PI * (d.startAngle + d.endAngle) / 2 - 90);\n };\n function exports(_selection, width, height, numCol, url) {\n\t_selection.each(function(d, i) {\n\n\t var context = d3.select(this).select('g.piecharts');\n\t \n\t if (context.empty()){\n\t\tcontext = d3.select(this).append('g').classed('piecharts',true);\n\t }\n\n\t var categories = d.map(function(el){\n\t\treturn el.name;\n\t });\n\t \n\t var numOrganism;\n\t context.attr('transform','translate('+ (width+100) + ',' + height + ')');\n\t var radius = Math.min(width, height) / 2;\n\t var colors = d3.scaleOrdinal(d3.schemeCategory20b);\n\t var arc = d3.arc()\n\t\t.outerRadius(radius - 10)\n\t\t.innerRadius(0);\n\t \n\t var labelArc = d3.arc()\n\t\t.outerRadius(radius +2)\n\t\t.innerRadius(radius +2);\n\t \n\t var pie = d3.pie()\n\t\t.sort(null)\n\t\t.value(function(d) { return d.count; });\n\n\t var updateSelection = context\n\t\t.selectAll('g.pieDraw')\n\t\t.data(d,function(d){return d.id;});\n\t \n\t \n\t // ENTER\n\t var enterSelection = updateSelection\n\t\t.enter()\n\t\t.append('g')\n\t\t.classed('pieDraw',true);\n\t \n\n \n var aEnter = enterSelection\n .append('a')\n .attr('transform', 'translate(0,-100)');\n\t \n\t aEnter\n .append('text')\n .classed('title',true);\n\t \n\t aEnter\n .append('title');\n\t \n\t \n\n\n \n\t var pieE = enterSelection\n\t\t.append('g')\n\t \t.classed('pie',true);\n\t \n\t \n\t var update = enterSelection \n\t\t.merge(updateSelection);\n\n var a = update\n .select('a')\n .attr('href',function(d){\n return url + 'tree.html?' +d.name;\n });\n\t a\n .select('text.title')\n .style('font-size','14px')\n .style('text-decoration','underline')\n .text(function(d){\n\t\t var countTotal = d3.sum(d.data.map(function(el){\n\t\t\treturn el.count;\n\t\t }));\n\t\t return d.name+\"(\"+d.id+\") [\"+countTotal+\"]\";\n\n\t\t});\n\t a\n\t\t.select('title')\n\t\t.text(function(d){\n\t\t return d.data.map(function(obj){\n\t\t\treturn obj.name + ' (' + obj.count + ')';\n\t\t }).join(' | ');\n\t\t});\n \n\t update.attr('transform',function(d,i){\n var column = i % numCol;\n var line = parseInt(i / numCol);\n \n\t return 'translate(' + (column * 250 ) + ', ' + ((height+200) * line) + ')';\n\t });\n\t \n\t var pies = update\n\t\t.select('g.pie')\n\t \t.attr('transform',function(d,i) {\n var count = d.data.length;\n\t\t var rotate = 0;\n\t\t switch (count) {\n\t\t case 1 :\n\t\t rotate = -90;\n\t\t break;\n\t\t case 2 :\n\t\t rotate = 0;\n\t\t break;\n\t\t }\n\t\t \n\t\t return 'translate('+width/2+',0)'+\n 'rotate('+ rotate + ')';\n\t\t});\n\t \n\t // update\n\t // \t.select('text')\n\t // \t.text(function(d){return d.name})\n\t // \t.attr('transform','translate(0, '+ (-height/2)+')');\n\t \n\t \n\t // EXIT\n\t updateSelection.exit().remove();\n\t \n\t var arcSelection = pies\n\t\t.selectAll('g.arc')\n\t\t.data(function(d){\n\t\t return pie(d.data);\n\t\t}, function(d) {return d.data.id});\n\t \n\t \n\t var arcEnter = arcSelection\n\t\t.enter()\n\t\t.append('g')\n\t\t.classed('arc',true);\n\n\t arcEnter\n\t \t.append('title');\n\t \n\t arcEnter\n\t\t.append('path')\n\t\t.attr('d',arc);\n\t \n\t arcEnter\n\t \t.append('text');\n\n\t \n\t \n\t // Update\n\t var arcUpdate = arcEnter\n\t\t.merge(arcSelection);\n\t \n\t arcUpdate\n\t\t.select('path')\n\t \t.style('fill',function(d){\n\t\t var c = colors(d.data.id);\n\t\t return c;\n\t\t});\n\t \n\t arcUpdate\n\t\t.select('text')\n\t \t.attr(\"transform\", function(d) {\n\t\t var angle = getAngle(d);\n\t\t var transform = \"translate(\" + labelArc.centroid(d) + \")\"+\n\t\t\t\"rotate(\" + getAngle(d) + \")\";\n\t\t\t\n\t\t if (angle > 90 && d.data.name) {\n\t\t\txcenter = (parseInt(substringName(d.data.name).length) * 6)/2;\n\t\t\ttransform += 'rotate(180, '+xcenter+',0)';\n\t\t }\n\t\t return transform;\n\t\t})\n\t\t.text(function(d){\n\t\t return substringName(d.data.name || '') + '(' + d.data.count + ')';\n\t\t});\n\n\n\t arcUpdate\n\t\t.select('title')\n\t\t.text(function(d){return d.data.name + \" (\"+d.data.id+\")\";});\n\t \n\t \n\t function substringName (name) {\n\t\tvar length = name.length;\n\t\treturn (length > limit) ? name.substring(0,limit) : name;\n\t }\n\t arcSelection.exit().remove();\n\n\t //Add the labels (Put it after to be on top of arcs)\n\t // var labels = pieE\n\t // \t.append('text');\n\t \n\t \n\t // labels\n\t // \t.merge(updateSelection.selectAll('text'))\n\n\t // labels.exit().remove();\n\t \n\n\t});\n }\n return exports;\n}\n" }, { "alpha_fraction": 0.707446813583374, "alphanum_fraction": 0.7234042286872864, "avg_line_length": 52.71428680419922, "blob_id": "96976be5fe695629c63296160ae4187773794f7d", "content_id": "b94534372ea37da8fd1336f514451023776dd026", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Shell", "length_bytes": 376, "license_type": "no_license", "max_line_length": 223, "num_lines": 7, "path": "/scripts/external/concatenateProgenomeRandIndex.sh", "repo_name": "rplanel/species-clustering", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nout=\"/env/cns/home/rplanel/my_proj/test/mash/data/rand-index/progenome/rand-indexes.csv\"\n\necho -e \"distance Rand HA MA FM Jaccard\" > $out\n\nfind /env/cns/home/rplanel/my_proj/test/mash/data/runs/progenome/na/21-1000/ -name work -prune -o -name distance-matrices -prune -o -name graph -prune -o -name trees -prune -o -name rand-index.csv -print | xargs cat >> $out\n" } ]
33
ViraAkshat/Virtual_Keyboard
https://github.com/ViraAkshat/Virtual_Keyboard
fc3adeb087dc084a95023a6d8b7cebee9c3b7b45
1092d4ca150f03388ef1dca2a9c22b4906a32f57
85d6179d47dfc6157019e70bb0473613a9b9055a
refs/heads/master
2021-05-17T03:02:27.198702
2020-06-27T11:22:03
2020-06-27T11:22:03
250,588,616
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5534644722938538, "alphanum_fraction": 0.597091555595398, "avg_line_length": 38, "blob_id": "1e22388e40d44549a4826364caf54410196f8ad5", "content_id": "671f7280fe7619c55bc1b2adb677508d3cb5b183", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1169, "license_type": "no_license", "max_line_length": 103, "num_lines": 30, "path": "/Assignment01/task2/test.py", "repo_name": "ViraAkshat/Virtual_Keyboard", "src_encoding": "UTF-8", "text": "import task2\nimport utils\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\ntcases = utils.load('testcases.pkl')\nfor tcase in tcases:\n img_patches, shape, reconstructed = tcases[tcase]\n cleaned_img = task2.reconstruct_from_noisy_patches(img_patches, shape)\n #if tcase == 3 :\n #print(img_patches)\n #print(shape)\n #print(reconstructed.shape)\n #key = [(197, 207, 214, 222), (167, 207, 197, 222), (153, 207, 167, 222), (137, 207, 153, 222)]\n #for i in key:\n # utils.make_fig(img_patches[i], cmap='gray', title='Yours')\n\n try:\n if np.allclose(cleaned_img, reconstructed):\n print('testcase# {} passed'.format(tcase))\n utils.make_fig(reconstructed, cmap='gray', title='Correct')\n utils.make_fig(cleaned_img, cmap='gray', title='Yours')\n plt.show()\n else:\n print('testcase# {} failed'.format(tcase))\n utils.make_fig(reconstructed, cmap='gray', title='Correct')\n utils.make_fig(cleaned_img, cmap='gray', title='Yours')\n plt.show()\n except Exception as e:\n print('testcase# {} failed'.format(tcase))" }, { "alpha_fraction": 0.460195392370224, "alphanum_fraction": 0.5154853463172913, "avg_line_length": 29.83974266052246, "blob_id": "3de1f947e5973df299212154491e29d37a3d0610", "content_id": "cde8cbfda655fe5a4c816749513007e55937038f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4811, "license_type": "no_license", "max_line_length": 104, "num_lines": 156, "path": "/Paper Keyboard/Virtual_Keyboard.md", "repo_name": "ViraAkshat/Virtual_Keyboard", "src_encoding": "UTF-8", "text": "# How to Use Keyboard\n\n1. Take a colored object to point a key.(I'm using orange color)\n2. Tap the key and wait for 1sec to print the key\n\n## Code\n\n```python\nimport numpy as np\nimport cv2\nimport time\n\ncaps = np.array([['!', '@', '#', '$', '%', '^', '&', '*', '(', ')'],\n ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'],\n ['Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P'],\n ['A', 'S ', 'D', 'F', 'G', 'H', 'J', 'K', 'L', '\\n'],\n ['Z', 'X', 'C', 'V', 'B', 'N', 'M', ' ', ' ', 'caps'],\n [':', ';', '\"', '\\'', ',', '.', '<', '>', '/', '?']])\nkeys = np.array([['!', '@', '#', '$', '%', '^', '&', '*', '(', ')'],\n ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'],\n ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p'],\n ['a', 's ', 'd', 'f', 'g', 'h', 'j', 'k', 'l', '\\n'],\n ['z', 'x', 'c', 'v', 'b', 'n', 'm', ' ', ' ', 'caps'],\n [':', ';', '\"', '\\'', ',', '.', '<', '>', '/', '?']])\ntext = ''\nCAPS, t1, t2, pressed_once, key = False, 0, 0, 0, (0, 0)\n\ncap = cv2.VideoCapture('vid4.mp4')\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))\n\n\n# To bring keyboard in perspective\ndef keyboardPerspective(image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n thg = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 71, 7)\n gauss = cv2.GaussianBlur(thg, (5, 5), 0)\n\n contours, h = cv2.findContours(gauss, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n maxArea, maxContour = 0, contours[0]\n for contour in contours:\n area = cv2.contourArea(contour)\n if area > maxArea:\n maxContour = contour\n maxArea = max(area, maxArea)\n\n epsilon = 0.1 * cv2.arcLength(maxContour, True)\n approx = cv2.approxPolyDP(maxContour, epsilon, True)\n pts = np.float32([approx[1][0], approx[0][0], approx[2][0], approx[3][0]])\n d = np.float32([[0, 0], [719, 0], [0, 1279], [719, 1279]])\n matrix = cv2.getPerspectiveTransform(pts, d)\n final = cv2.warpPerspective(image, matrix, (720, 1280))\n final = np.rot90(final)\n\n return final, approx\n\n\n# To bring frame in perspective\ndef perspective(image, pos):\n pts = np.float32([pos[1][0], pos[0][0], pos[2][0], pos[3][0]])\n d = np.float32([[0, 0], [719, 0], [0, 1279], [719, 1279]])\n matrix = cv2.getPerspectiveTransform(pts, d)\n final = cv2.warpPerspective(image, matrix, (720, 1280))\n final = np.rot90(final)\n\n return final\n\n\n# To find coordinates of fingertip\ndef coordinates(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n low = np.array([9, 160, 120])\n high = np.array([12, 255, 250])\n\n color = cv2.inRange(img, low, high)\n M = cv2.moments(color)\n if M[\"m00\"] != 0:\n x = int(M[\"m10\"] / M[\"m00\"])\n y = int(M[\"m01\"] / M[\"m00\"])\n else:\n x, y = 0, 0\n\n return x, y\n\n\n# To find which key is pressed\ndef is_key_pressed(x, y):\n global t1, t2, key, pressed_once\n xKey = x//128\n yKey = y//120\n\n enter_new_cell = (key != (yKey, xKey))\n if enter_new_cell:\n t1 = time.monotonic()\n key = (yKey, xKey)\n pressed_once = 0\n else:\n t2 = time.monotonic()\n\n if (t2 - t1) > 0.8 and pressed_once == 0:\n pressed_once += 1\n\n\n# Finds the keyboard position\n_, keyboard = cap.read()\nkeyboard = cv2.resize(keyboard, (1280, 720))\nkeyboard = np.rot90(keyboard)\nkeyboard = np.rot90(keyboard)\nkeyboard, keyPos = keyboardPerspective(keyboard)\n\nwhile cap.isOpened():\n\n ret, frame = cap.read() # Reads the video\n if ret is False:\n break\n\n frame = cv2.resize(frame, (1280, 720)) # Resizing the video\n frame = cv2.rotate(frame, cv2.ROTATE_180)\n # frame2 = np.rot90(frame) # Tweak this based on orientation of video\n # frame2 = np.rot90(frame2)\n cv2.imshow('one', frame)\n\n res = perspective(frame, keyPos) # Brings keyboard in perspective\n cv2.imshow('result', res)\n\n cx, cy = coordinates(res) # Finds the center of pointer\n is_key_pressed(cx, cy) # Checks whether key is pressed\n\n if pressed_once == 1 and cx != 0:\n if key == (4, 9):\n CAPS = not CAPS\n pressed_once += 1\n else:\n if CAPS:\n text += caps[key]\n else:\n text += keys[key]\n pressed_once += 1\n print(text)\n\n y0, dy = 650, 40\n for i, line in enumerate(text.split('\\n')):\n y = y0 + i * dy\n frame = cv2.putText(frame, line, (50, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)\n cv2.imshow('frame', frame)\n out.write(frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n\n\n```\n" }, { "alpha_fraction": 0.6580844521522522, "alphanum_fraction": 0.6673532724380493, "avg_line_length": 23.299999237060547, "blob_id": "7151c4c177f089431b4906d2bf6bd1cedb7abd50", "content_id": "28f965869db367f7931070fa5320c835a1447a55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 971, "license_type": "no_license", "max_line_length": 57, "num_lines": 40, "path": "/Assignment01/task3.py", "repo_name": "ViraAkshat/Virtual_Keyboard", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import image\nfrom scipy.cluster.vq import kmeans2\n\ndef K_Clustering(img, k) :\n# Input an image\n#img = plt.imread('image01.jpeg')\n\n# converting to float\n imgf = img.astype(float)\n print(imgf)\n\n# Converting to 2d\n nr, nc, c = img.shape\n print(nr, nc, c)\n imgf = imgf.reshape((nr*nc, 3))\n print(imgf.shape)\n\n#K Clustering\n centroid, label = kmeans2(imgf, k, minit = '++')\n imgf = centroid[label]\n imgf = imgf.reshape((nr, nc, c))\n imgOut = imgf.astype(np.uint8)\n\n return imgOut\n\npath = plt.imread(input(\"Enter image path: \"))\n#path = plt.imread('image01.jpeg')\nk = int(input(\"Enter K value: \"))\npathOut = input(\"Enter the path for Output Image: \")\n\nkImg = K_Clustering(path, k) #function call\n# Plotting & Saving an image\nplt.figure()\nplt.imshow(kImg, cmap = 'hot', interpolation = 'nearest')\nplt.axis('off')\nplt.title('k = ' + k)\nplt.show()\nimage.imsave(pathOut, kImg)" } ]
3
kazgoto/soracom-remote
https://github.com/kazgoto/soracom-remote
426d4e0e13559daf45a3b5932482b9105b9f88ba
19b508a4386e70ee6e8b04959a552e229a873e50
3ac4c82670352b99334814e86ea15280d335a304
refs/heads/master
2021-01-10T08:48:28.901798
2015-10-26T03:47:54
2015-10-26T03:47:54
44,910,146
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6012746095657349, "alphanum_fraction": 0.6070933938026428, "avg_line_length": 34.03883361816406, "blob_id": "6b619a1dbae45c89ee3ea56282dcb795d09dfaf4", "content_id": "a1c238179b117d48a47c177a8fc116bcfdbbcaf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3693, "license_type": "no_license", "max_line_length": 110, "num_lines": 103, "path": "/app.py", "repo_name": "kazgoto/soracom-remote", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom flask import Flask, render_template, request, redirect, make_response, session, url_for\nimport httplib2\nimport json\n\napi_url = 'https://api.soracom.io/v1'\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'SORACOM Remote'\n\n# functions\ndef _call_api(path, method, params):\n h = httplib2.Http(\".cache\")\n headers = {\n 'X-Soracom-API-Key' : session['apiKey'],\n 'X-Soracom-Token' : session['token'],\n 'Content-Type' : 'application/json'\n }\n resp, content = h.request(api_url + path, method, json.dumps(params), headers=headers)\n error = ''\n if resp.status != 200:\n error = 'Response is bad: ' + str(resp.status) + ' ' + content\n exit\n\n return error, json.loads(content)\n\ndef _is_authorized():\n username = request.form['username']\n password = request.form['password']\n if username is None or password is None:\n return False\n h = httplib2.Http(\".cache\")\n data = json.dumps({\n \"email\" : request.form.get('username'),\n \"password\" : request.form.get('password')\n })\n headers = {\n 'Content-Type' : 'application/json',\n 'Accept' : 'application/json'\n }\n resp, content = h.request(api_url + '/auth', 'POST', body=data, headers=headers)\n if resp.status != 200:\n return False\n\n resp_json = json.loads(content)\n session['apiKey'] = resp_json['apiKey']\n session['token'] = resp_json['token']\n return True\n\n# routing\[email protected]_request\ndef before_request():\n if session.get('apiKey') is not None:\n return\n if request.path == '/login' or request.path.find('/static') == 0:\n return\n return redirect('/login')\n\[email protected]('/', methods=['GET'])\ndef index():\n error, sims = _call_api('/subscribers', 'GET', {})\n return render_template('index.html', sims=sims, message='', error=error)\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST' and _is_authorized():\n return redirect(url_for('index'))\n return render_template('login.html')\n\[email protected]('/logout', methods=['GET'])\ndef logout():\n session.pop('apiKey', None)\n session.pop('token', None)\n return redirect(url_for('login'))\n\[email protected]('/sim/<imsi>/modify', methods=['GET'])\ndef modify(imsi):\n new_type = request.args.get('new_type')\n old_type = request.args.get('old_type')\n error, sim = _call_api('/subscribers/' + imsi + '/update_speed_class', 'POST', { 'speedClass': new_type })\n if error == '':\n message = 'SIM {} のタイプを {} から {} に変更しました。'.format(imsi, old_type, new_type)\n error, sims = _call_api('/subscribers', 'GET', {})\n return render_template('index.html', sims=sims, message=unicode(message, 'utf-8'), error=error)\n\[email protected]('/sim/<imsi>/activate', methods=['GET'])\ndef activate(imsi):\n error, sim = _call_api('/subscribers/' + imsi + '/activate', 'POST', {})\n if error == '':\n message = 'SIM {} を利用可能にしました。'.format(imsi)\n error, sims = _call_api('/subscribers', 'GET', {})\n return render_template('index.html', sims=sims, message=unicode(message, 'utf-8'), error=error)\n\[email protected]('/sim/<imsi>/deactivate', methods=['GET'])\ndef deactivate(imsi):\n error, sim = _call_api('/subscribers/' + imsi + '/deactivate', 'POST', {})\n if error == '':\n message = 'SIM {} を利用停止(休止状態)しました。'.format(imsi)\n error, sims = _call_api('/subscribers', 'GET', {})\n return render_template('index.html', sims=sims, message=unicode(message, 'utf-8'), error=error)\n\n# main\nif __name__ == '__main__':\n app.run(debug=True,host='0.0.0.0',port=80)\n" } ]
1
amankaushik/newsNotify
https://github.com/amankaushik/newsNotify
29d9d88ac8c2424f9ea8994fb65f203359d43631
948433df4628dce1912d87e8cce2ec93896421bf
590693422fee73e08605efcfd7b7163fde5c4272
refs/heads/master
2021-01-17T13:26:00.341127
2016-07-22T22:19:26
2016-07-22T22:19:26
47,683,229
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.739393949508667, "avg_line_length": 26.66666603088379, "blob_id": "9a90f715f86496756c2ee35a719ceba8bbd5c651", "content_id": "6b2c7e8f8c2a1d920c7f06c1137d1b27203aae85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 67, "num_lines": 6, "path": "/HackerNewsItem.py", "repo_name": "amankaushik/newsNotify", "src_encoding": "UTF-8", "text": "from NewsItem import NewsItem\n\nclass HackerNewsItem(NewsItem):\n\t''' Class Modeling HackerNews news item. Inherits from NewsItem'''\n\tdef __init__(self):\n\t\tself.id = 0" }, { "alpha_fraction": 0.788203775882721, "alphanum_fraction": 0.788203775882721, "avg_line_length": 92.25, "blob_id": "2a24e92bc3478866228e060a9841e1ab6183c160", "content_id": "b90612d560e8a1c4a3c2a6702a92e6925d2e4242", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 373, "license_type": "no_license", "max_line_length": 211, "num_lines": 4, "path": "/README.md", "repo_name": "amankaushik/newsNotify", "src_encoding": "UTF-8", "text": "# newsNotify\nUses the [HackerNewsAPI](https://github.com/HackerNews/API) to get and display news articles and also identify the new, retained and removed articles. Also a front-end written using PyQt to display the articles.\n\nDepreciated. New Repo - [notify-api](https://github.com/amankaushik/notify-api), [notify-frontend](https://github.com/amankaushik/notify-frontend)\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 10.166666984558105, "blob_id": "633549291e3037ac58acd282de82ebcd00a89306", "content_id": "79abfb82c1ba5bf27a0f66f8130a2d9731e54369", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "no_license", "max_line_length": 20, "num_lines": 6, "path": "/NewsItem.py", "repo_name": "amankaushik/newsNotify", "src_encoding": "UTF-8", "text": "class NewsItem:\n\turl = \"\"\n\ttitle = \"\"\n\n\tdef __init__(self):\n\t\tpass" }, { "alpha_fraction": 0.7106879353523254, "alphanum_fraction": 0.7131449580192566, "avg_line_length": 29.13888931274414, "blob_id": "233e609ba430e0325e35d7d883da91aa2b8bc621", "content_id": "c282d0d629262dc0af0e97dd84f4db96f88157ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3256, "license_type": "no_license", "max_line_length": 93, "num_lines": 108, "path": "/NewsGetter.py", "repo_name": "amankaushik/newsNotify", "src_encoding": "UTF-8", "text": "# ADD LOGGING\n\nimport configparser\nimport json\nimport urllib.request as uReq\nimport urllib.error as uErr\nfrom retryDeco import retry\nfrom collections import namedtuple\nfrom HackerNewsItem import HackerNewsItem\n\nclass HackerNewsAPIImpl:\n\tdef __init__(self):\n\t\tpass\n\n\tdef checkRedundantIDs(self, idList):\n\t\ttry:\n\t\t\tfFile = open('ids.txt', 'r')\n\t\texcept (IOError, OSError) as err:\n\t\t\tprint (\"Could not open file: \" + format(err))\n\t\telse:\n\t\t\toldIDList = []\n\t\t\tfor idd in fFile:\n\t\t\t\toldIDList.append(int(idd))\n\t\tfinally:\n\t\t\tfFile.close()\n\t\ttry:\n\t\t\tfFile = open('ids.txt', 'w')\n\t\texcept (IOError, OSError) as err:\n\t\t\tprint (\"Could not open file: \" + format(err))\n\t\telse:\n\t\t\tfor idd in idList:\n\t\t\t\tfFile.write(str(idd) + '\\n')\n\t\tfinally:\n\t\t\tfFile.close()\n\t\t\n\t\treturn [idd for idd in idList if idd not in oldIDList]\n\n\tdef getStoryIDs(self, topStoriesBaseUrl, reponseFormat, limit):\n\t\tstoryIDs = []\n\t\t\n\t\ttopStoriesFinalUrl = topStoriesBaseUrl + reponseFormat\n\t\tresponse = getResponse(topStoriesFinalUrl)\n\t\tstoryIDs = json.loads(response.read().decode(\"utf-8\"))\n\t\tstoryIDs = storyIDs[:int(limit)]\n\t\treturn storyIDs\n\n\tdef getStories(self, idList, storyBaseUrl, reponseFormat):\n\t\thackerNewsItemList = []\n\t\tcount = 1;\n\t\tfor id in idList:\n\t\t\tstoryFinalUrl = storyBaseUrl + str(id) + reponseFormat\n\t\t\tresponse = getResponse(storyFinalUrl)\n\t\t\tstoryData = json.loads(response.read().decode(\"utf-8\"))\n\t\t\tif storyData is not None and 'url' in list(storyData.keys()):\n\t\t\t\thackerNewsItem = HackerNewsItem()\n\t\t\t\t# JSON response inconsistant, check if all keys exist for every response\n\t\t\t\thackerNewsItem.id = storyData['id']\n\t\t\t\thackerNewsItem.title = storyData['title']\n\t\t\t\thackerNewsItem.url = storyData['url']\n\t\t\t\thackerNewsItemList.append(hackerNewsItem)\n\t\t\t\tprint('Story ' + str(count) + ' retrived.')\n\t\t\t\tcount += 1\n\t\treturn hackerNewsItemList\n\nclass Utils:\n\tdef wiriteJSONToFile(self, filename, collection):\n\t\tprint('Writing Stories to File ...')\n\t\twith open(filename, 'w', encoding='utf8') as outF:\n\t\t\tfor item in collection:\n\t\t\t\tjson.dump(item.__dict__, outF)\n\t\tprint('Stories written to file')\n\n@retry(uErr, tries=4, delay=3, backoff=2)\ndef getResponse(url):\n\ttry:\n\t\treturn uReq.urlopen(url)\n\texcept HTTPError as err:\n\t\tprint ('HTTPError: '.format(err))\n\texcept URLError as err:\n\t\tprint ('URLError: '.fromat(err))\n\nif __name__ == '__main__':\n\tprint('Reading Configuration File ... ')\n\tconfig = configparser.ConfigParser()\n\tconfig.read('config.ini')\n\thnProperty = config['hackernews']\n\t\n\tstoryBaseUrl = hnProperty['storyBaseUrl']\n\treponseFormat = hnProperty['fromat']\n\ttopStoriesBaseUrl = hnProperty['topStoriesBaseUrl']\n\tlimit = int(config['default']['hnLimit'])\n\n\thackerNewsAPIImpl = HackerNewsAPIImpl()\n\tprint('Getting Story IDs ... ')\n\tidList = hackerNewsAPIImpl.getStoryIDs(topStoriesBaseUrl, reponseFormat, limit)\n\tprint('Story IDs retrived ... ')\n\tprint('checking for redundancy ...')\n\tfinalIDList = hackerNewsAPIImpl.checkRedundantIDs(idList)\t\n\tprint('Checked.')\n\n\tif not finalIDList:\n\t\tprint ('No new stories')\n\telse:\n\t\tprint('Getting Story Content ... ')\n\t\thackerNewsItemList = hackerNewsAPIImpl.getStories(finalIDList, storyBaseUrl, reponseFormat)\n\t\tprint('All Stories retrived.')\n\t\tutil = Utils()\n\t\tutil.wiriteJSONToFile('stories.txt', hackerNewsItemList)\n\t" }, { "alpha_fraction": 0.7584269642829895, "alphanum_fraction": 0.7752808928489685, "avg_line_length": 24.571428298950195, "blob_id": "c8658c7ab67d8e3e7c45c298cf3db7b1f25bf804", "content_id": "ec9c4909bddcc09f7a36ffb928a8f794e20d2f82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 178, "license_type": "no_license", "max_line_length": 68, "num_lines": 7, "path": "/config.ini", "repo_name": "amankaushik/newsNotify", "src_encoding": "UTF-8", "text": "[default]\nhnLimit = 2\n\n[hackernews]\ntopStoriesBaseUrl = https://hacker-news.firebaseio.com/v0/topstories\nstoryBaseUrl = https://hacker-news.firebaseio.com/v0/item/\nfromat = .json" } ]
5
Isi78/mysite
https://github.com/Isi78/mysite
fe230665556af86b845be43658df385023db296f
f328ad83cd5618654e3522d3af727739618f9ed5
c49d38094a95368df35e2484f9b7f811f174873c
refs/heads/master
2020-03-30T06:39:46.629273
2018-09-29T15:47:15
2018-09-29T15:47:15
150,878,475
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6709401607513428, "alphanum_fraction": 0.6709401607513428, "avg_line_length": 18.5, "blob_id": "bbbb9bd2d1e7f2a35056494b3a228d1580b7f3ce", "content_id": "a052375074bd40c94640ace58e841e820306c86c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 47, "num_lines": 24, "path": "/posts/views.py", "repo_name": "Isi78/mysite", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\n\n\n\ndef index(request):\n return render(request, 'index.html', {})\n\ndef music(request):\n return render(request, 'music.html', {})\n\ndef contatti(request):\n return render(request, 'contatti.html', {})\n\ndef guitar(request):\n return render(request, 'guitar.html', {})\n\ndef news(request):\n return render(request, 'news.html', {})\n\ndef login(request):\n return render(request, 'login.html', {})\n\n\n# Create your views here.\n" }, { "alpha_fraction": 0.6081081032752991, "alphanum_fraction": 0.6081081032752991, "avg_line_length": 27.538461685180664, "blob_id": "4a1458287982d24742fb66bb40df8d490fdca51f", "content_id": "35febf989b962c9d43509bbeb569e6e94130565c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 370, "license_type": "no_license", "max_line_length": 57, "num_lines": 13, "path": "/posts/urls.py", "repo_name": "Isi78/mysite", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^music/$', views.music, name='music'),\n url(r'^contatti/$', views.contatti, name='contatti'),\n url(r'^guitar/$', views.guitar, name='guitar'),\n url(r'^news/$', views.news, name='news'),\n url(r'^login/$', views.login, name='login'),\n\n\n]" }, { "alpha_fraction": 0.6849315166473389, "alphanum_fraction": 0.698630154132843, "avg_line_length": 28.200000762939453, "blob_id": "2e3cd642b38b6927d58ba952572118cedba0bd8f", "content_id": "48fb6836ce3a14d88da00c505c5251fcaad995d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 146, "license_type": "no_license", "max_line_length": 42, "num_lines": 5, "path": "/script/bin/django-admin.py", "repo_name": "Isi78/mysite", "src_encoding": "UTF-8", "text": "#!/home/isi/mysite/script/bin/python3.6\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n" } ]
3
ItsSaravananRajendran/DigitalNotes
https://github.com/ItsSaravananRajendran/DigitalNotes
8573eea5dbd12cbee0ac07dbae3e7d19e20ac068
34725c4cac42c42adbe66646113936feab229f64
34025490a6b93706f10968e756ade2df553ce68e
refs/heads/master
2021-09-04T03:18:21.343537
2018-01-15T06:57:15
2018-01-15T06:57:15
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6758530139923096, "alphanum_fraction": 0.7099737524986267, "avg_line_length": 35.19047546386719, "blob_id": "5e46035d08ef1ea4b8577f5296335b4b42eb79fa", "content_id": "8916c8c9309cf8904f127e95b8f8b1318df75c0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 762, "license_type": "no_license", "max_line_length": 217, "num_lines": 21, "path": "/README.md", "repo_name": "ItsSaravananRajendran/DigitalNotes", "src_encoding": "UTF-8", "text": "[![license](https://img.shields.io/github/license/mashape/apistatus.svg)]() \n\n# DigitalNotes\nThings written in android will be transferred to PC, where it is displayed like notes. \n\n# Dependencies\n\n * PIP\n * ADB\n * matplotlib\n\n\n# How to use?\n\n1. Install ADB [Windows](https://www.howtogeek.com/125769/how-to-install-and-use-abd-the-android-debug-bridge-utility/) & [Linux](https://www.linuxbabe.com/ubuntu/how-to-install-adb-fastboot-ubuntu-16-04-16-10-14-04).\n2. Install the mobile app from app/build/outputs/apk using the command ```adb install app-debug.apk```\n3. ``` git clone https://github.com/thunderbo1t/DigitalNotes.git ```\n4. ``` cd DigitalNotes ```\n5. ``` pip install -r requirements.txt ```\n6. Open the app in phone \n7. ``` python server.py ``` \n\n" }, { "alpha_fraction": 0.7192118167877197, "alphanum_fraction": 0.7364531755447388, "avg_line_length": 22.941177368164062, "blob_id": "edbee143dddb9d39ee1e3b0915f6dd0002d27dc1", "content_id": "803083f2e53cee68bbfd0b4833af4d9ecc823ee3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "no_license", "max_line_length": 68, "num_lines": 17, "path": "/test.py", "repo_name": "ItsSaravananRajendran/DigitalNotes", "src_encoding": "UTF-8", "text": "import numpy\nfrom matplotlib import pyplot, transforms\n\ndatax= [I for I in range(100)]\ndatay = datax\n\n# first of all, the base transformation of the data points is needed\nbase = pyplot.gca().transData\nrot = transforms.Affine2D().rotate_deg(270)\n\n# define transformed line\nline = pyplot.plot(datax,datay, 'r-', transform= rot + base)\n\n# or alternatively, use:\n# line.set_transform(rot + base)\n\npyplot.show()" }, { "alpha_fraction": 0.5549450516700745, "alphanum_fraction": 0.5947802066802979, "avg_line_length": 21.75, "blob_id": "d464228c6d7c453c7bf3f52d2417778896345c7d", "content_id": "7cbba842469288f67364c9c7420b4607d96406f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 728, "license_type": "no_license", "max_line_length": 48, "num_lines": 32, "path": "/server.py", "repo_name": "ItsSaravananRajendran/DigitalNotes", "src_encoding": "UTF-8", "text": "import socket\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib import transforms\n\ndef shift(data):\n data = data.replace(\"\\n\",\"\")\n points = data.split(',')\n x = []\n y = []\n for pts in points[1:]:\n x_, y_ = pts.split('+')\n x.append(x_)\n y.append(-1*int(y_))\n base = plt.gca().transData\n rot = transforms.Affine2D().rotate_deg(-180)\n\n plt.plot(x,y,'r-')\n #plt.plot(x,y, 'r-', transform= rot + base)\n plt.show()\n\nos.system(\"adb forward tcp:15120 tcp:15120\")\ncon = socket.socket()\ncon.connect(('localhost',15120))\ncon.send(\"Testing\\n\")\ndata = con.recv(1024)\nif data == 'Testing\\n':\n print \"Success\"\nwhile (True):\n data = con.recv(4096)\n print data\n shift(data)\n" } ]
3
qkrwjdan/hanyangProject
https://github.com/qkrwjdan/hanyangProject
1b7f43a7dc7cdb66566bec05b9c0654fc147b8c8
4acfc2e6558e3640865982f8432f02e4901e7bd9
7eca4236c576bfb9683d0fd430522d2588bb8da5
refs/heads/master
2021-07-18T06:27:56.471277
2021-07-02T04:14:33
2021-07-02T04:14:33
224,994,229
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4377104341983795, "alphanum_fraction": 0.44781145453453064, "avg_line_length": 20.214284896850586, "blob_id": "d62a4a261bf2e461d5b64c29fefa2db6ab0cc537", "content_id": "a8c8ca95d9e821e86c6dcf1ade9d87a51b31e365", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 630, "license_type": "no_license", "max_line_length": 60, "num_lines": 28, "path": "/templates/user.html", "repo_name": "qkrwjdan/hanyangProject", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n\n{% block body %}\n<div id=\"main-menu\">\n <ul id=\"nav\">\n {% if info[0] == 1 %}\n <li><a href='/login/user/seller'>Seller</a></li>\n {% endif %}\n {% if info[1] == 1 %}\n <li><a href='/login/user/customer'>Customer</a></li>\n {% endif %}\n {% if info[2] == 1 %}\n <li><a href='/login/user/delivery'>Delivery</a></li>\n {% endif %}\n </ul>\n</div>\n\n<div id=\"container\">\n <div id=\"content\">\n <div id=\"tt\">안녕하세요 {{ k }}님</div>\n <div>상단의 메뉴를 클릭해주세요!</div>\n </div>\n</div>\n\n\n\n \n{% endblock %}\n" }, { "alpha_fraction": 0.49854227900505066, "alphanum_fraction": 0.5043731927871704, "avg_line_length": 21.491804122924805, "blob_id": "c3a1e71f6aa7624af6561ad4d22b130bd56f4eff", "content_id": "73016cc51267ad4cc6baa6eba3d4192f95bd084a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1450, "license_type": "no_license", "max_line_length": 78, "num_lines": 61, "path": "/templates/ordercheck.html", "repo_name": "qkrwjdan/hanyangProject", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n\n{% block body %}\n<div id=\"main-menu\">\n <ul id=\"nav\">\n {% if info[0] == 1 %}\n <li><a href='/login/user/seller'>Seller</a></li>\n {% endif %}\n {% if info[1] == 1 %}\n <li><a href='/login/user/customer'>Customer</a></li>\n {% endif %}\n {% if info[2] == 1 %}\n <li><a href='/login/user/delivery'>Delivery</a></li>\n {% endif %}\n </ul>\n</div>\n\n<div id=\"container\">\n\t\t<div id=\"content\">\n<div id=\"tt\">배달원 할당</div>\n\n<table class=\"bbs-table\">\n<tr>\n <th>배달원 did</th>\n <th>배달원 이름</th>\n <th>배달원 전화번호</th>\n <th>할당</th>\n</tr>\n{% for i in view %}\n<tr>\n <td style=\"text-align: center;\">{{ i['del_id'] }}</td>\n <td style=\"text-align: center;\">{{ i['name'] }}</td>\n <td style=\"text-align: center;\">{{ i['phone'] }}</td>\n <td style=\"text-align: center;\">\n <form action='/login/user/seller/store/ordercheck/real' method='post'>\n <input type='hidden' name=\"did\" value=\"{{i['del_id']}}\">\n <input type='hidden' name=\"orderinfo\" value=\"{{orderinfo}}\">\n <input type=\"submit\" value=\"할당\">\n </form>\n </td>\n</tr>\n{% endfor %}\n\n</table>\n\n\n</div>\n</div>\n \n<div id=\"sidebar\">\n\t<h1>Seller</h1>\n\t<ul>\n\t\t<li><a href='/login/user/seller'>소유중인 가게 리스트</a></li>\n\t\t<li><a href='/login/user/schange'>개인 정보 변경</a></li>\n\t</ul>\n</div>\n\n\n\n \n{% endblock %}\n" }, { "alpha_fraction": 0.5843881964683533, "alphanum_fraction": 0.607594907283783, "avg_line_length": 18.75, "blob_id": "7f8e8792b539d455ea46badc3b3fa59be99f6cbb", "content_id": "156ecc1172da91dcaa7d626f1af905cc35a0a534", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/test.py", "repo_name": "qkrwjdan/hanyangProject", "src_encoding": "UTF-8", "text": "import pymysql\n\ndb_connector = {\n 'host' : 'localhost',\n 'port' : 3306,\n 'user' : 'root',\n 'passwd' : 'web0206A',\n 'db' : 'testproject',\n 'charset' : 'utf8'\n}\n\n\nconn = pymysql.connect(**db_connector)\ncur = conn.cursor(pymysql.cursors.DictCursor)\nsql = \"INSERT INTO ex VALUES(now(),20)\"\ncur.execute(sql)\n\nfor i in range (21,29):\n sql = \"INSERT INTO ex VALUES(now(),{i})\".format(i = i)\n cur.execute(sql)\n\nconn.commit()\nconn.close()\nprint('ALL DONE')\n" }, { "alpha_fraction": 0.6163897514343262, "alphanum_fraction": 0.6192365288734436, "avg_line_length": 29.053295135498047, "blob_id": "6063e60df95973faf47ef07d78413d7dec2c9abd", "content_id": "e920241e9b3d5ca981c389915bd1f5359ef7fed0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23408, "license_type": "no_license", "max_line_length": 226, "num_lines": 713, "path": "/app.py", "repo_name": "qkrwjdan/hanyangProject", "src_encoding": "UTF-8", "text": "from flask import Flask, redirect, render_template, request\nimport pymysql\nimport datetime\n\napp = Flask(__name__)\n\nuserid = {\n 'id' : None,\n 'email' : None,\n 'passwd' : None,\n 'storesid' : None\n}\nuserinfo = [0, 0, 0] # seller, customer, delivery\nmenulist = [] # 메뉴 리스트\n\n\ndb_connector = {\n 'host' : 'localhost',\n 'port' : 3306,\n 'user' : 'root',\n 'passwd' : '1234567890',\n 'db' : 'project',\n 'charset' : 'utf8'\n}\n\[email protected](\"/\")\ndef index():\n userid['email'] = None\n userid['passwd'] = None\n userid['id'] = None\n userinfo[0] = 0\n userinfo[1] = 0\n userinfo[2] = 0\n return render_template(\"login.html\")\n\[email protected](\"/login\", methods=['GET', 'POST'])\ndef login():\n email = request.form.get('email')\n passwd = request.form.get('pw')\n \n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = f\"SELECT sellers_id FROM sellers WHERE passwd = '{passwd}' AND email = '{email}'\"\n cur.execute(sql)\n seller = cur.fetchall()\n sql = f\"SELECT customer_id FROM customers WHERE passwd = '{passwd}' AND email = '{email}'\"\n cur.execute(sql)\n customer = cur.fetchall()\n sql = f\"SELECT del_id FROM delivery WHERE passwd = '{passwd}' AND email = '{email}'\"\n cur.execute(sql)\n delivery = cur.fetchall()\n\n if (not seller) and (not customer) and (not delivery):\n return render_template('error.html')\n if seller:\n userinfo[0] = 1\n if customer:\n userinfo[1] = 1\n if delivery:\n userinfo[2] = 1\n\n userid['email'] = email\n userid['passwd'] = passwd\n\n if seller :\n userid['id'] = seller[0]['sellers_id']\n elif customer :\n userid['id'] = customer[0]['customer_id']\n elif delivery :\n userid['id'] = delivery[0]['del_id']\n else :\n userid['id'] = None\n conn.close()\n return redirect(\"/login/user\")\n\[email protected](\"/login/user\", methods=['GET', 'POST'])\ndef user():\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n if userinfo[0] == 1:\n sql = \"SELECT name FROM sellers WHERE sellers_id = %s\" \n if userinfo[1] == 1:\n sql = \"SELECT name FROM customers WHERE customer_id = %s\"\n if userinfo[2] == 1:\n sql = \"SELECT name FROM delivery WHERE del_id = %s\"\n \n cur.execute(sql,(userid[\"id\"]))\n name = cur.fetchall()\n\n conn.close()\n\n return render_template(\"user.html\", info=userinfo, k=name[0]['name'])\n\n\n# ========== 구매자 ==========\n\n# 구매자 관리 화면\[email protected](\"/login/user/customer\", methods=['GET', 'POST'])\ndef customer():\n \"\"\"\n 구매자 관리 화면 페이지\n 현재 비밀번호와 이름과 주소를 확인하기 위함\n \"\"\"\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT * FROM customers where customer_id = {userid}\".format(userid = userid['id'])\n cur.execute(sql)\n\n customer = cur.fetchall()\n\n conn.close()\n\n return render_template(\"customer.html\", info=userinfo, customer=customer[0])\n\n# 구매자 비밀번호 변경\[email protected](\"/login/user/customer/pw\", methods = ['GET', 'POST'])\ndef cpw():\n \"\"\"\n 로그인한 구매자 비밀번호 변경\n \"\"\"\n newPw = request.form.get('passwd')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"UPDATE customers SET passwd = '{newPw}' WHERE customer_id = {userid}\".format(newPw = newPw,userid = userid['id'])\n\n cur.execute(sql)\n conn.commit()\n conn.close()\n\n return redirect(\"/login/user\")\n\n# 구매자 이름 변경\[email protected](\"/login/user/customer/name\", methods=['GET', 'POST'])\ndef cname():\n \"\"\"\n 로그인한 구매자 이름 변경\n \"\"\"\n newName = request.form.get('name')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"UPDATE customers SET name = '{newName}' WHERE customer_id = {userid}\".format(newName = newName,userid = userid['id'])\n\n cur.execute(sql)\n conn.commit()\n conn.close()\n return redirect(\"/login/user\")\n\n# 구매자 주소 변경\[email protected](\"/login/user/customer/address\", methods=['GET', 'POST'])\ndef addchan():\n \"\"\"\n 로그인한 구매자 주소 변경\n \"\"\"\n newAdd= request.form.get('address')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"UPDATE customers SET address = '{newAdd}' WHERE customer_id = {userid}\".format(newAdd = newAdd,userid = userid['id'])\n\n cur.execute(sql)\n conn.commit()\n conn.close()\n return redirect(\"/login/user\")\n\n# 구매자 구매화면\[email protected](\"/login/user/customer/buy\", methods=['GET', 'POST'])\ndef buy():\n \"\"\"\n 구매화면 페이지\n 로그인한 구매자의 주소로 가게 검색을 하기 위함\n \"\"\"\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT address FROM customers WHERE customer_id = {userid}\".format(userid = userid['id'])\n\n cur.execute(sql)\n caddress = cur.fetchall()\n conn.close()\n\n return render_template(\"buy.html\", info=userinfo, cus_addr=caddress[0]['address'])\n\n# 고객 기본 주소로 가게 검색\[email protected](\"/login/user/schange/consearch\", methods=['GET', 'POST'])\ndef consearch():\n \"\"\"\n 고객 기본 주수로 가게 검색\n 로그인한 구매자의 주소로 부터 가까운 가게 검색을 하기 위함\n \"\"\"\n addr = request.form.get('address')\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT * FROM stores WHERE address = '{addr}'\".format(addr = addr)\n\n cur.execute(sql)\n store = cur.fetchall()\n conn.close()\n\n return render_template(\"storesearch.html\", info=userinfo, store=store)\n\n# 이름으로 가게 검색\[email protected](\"/login/user/schange/namesearch\", methods=['GET', 'POST'])\ndef namesearch():\n \"\"\"\n 이름으로 가게 검색\n 가게의 이름으로 검색하기 위함(부분 일치 가능)\n \"\"\"\n name = request.form.get('name')\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT * FROM stores WHERE sname LIKE '%{name}%'\".format(name = name)\n \n\n cur.execute(sql)\n store = cur.fetchall()\n conn.close()\n\n return render_template(\"storesearch.html\", info=userinfo, store=store)\n\n# 입력 주소로 가게 검색\[email protected](\"/login/user/schange/addresssearch\", methods=['GET', 'POST'])\ndef addresssearch():\n \"\"\"\n 입력 주소로 가게 검색\n 입력한 주소로 가게를 검색하기 위함(부분 일치 가능)\n \"\"\"\n keyaddr = request.form.get('keyaddr')\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT * FROM stores WHERE address LIKE '%{addr}%'\".format(addr = keyaddr)\n\n cur.execute(sql)\n store = cur.fetchall()\n conn.close()\n return render_template(\"storesearch.html\", info=userinfo, store=store)\n\n# 가게 정보, 메뉴 정보, 장바구니\[email protected](\"/login/user/customer/storebuy\", methods=['GET', 'POST'])\ndef storebuy():\n buystoresid = request.form.get('storesid')\n o_menu = request.form.get('menu')\n o_num = request.form.get('num')\n o_sid = request.form.get('sid')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n if o_menu and o_num:\n sql = \"SELECT * FROM menu WHERE name = '{mname}' AND store_id = {sid}\".format(mname = o_menu,sid = o_sid)\n cur.execute(sql)\n o_menu_id = cur.fetchone()['menu_id']\n \n if not buystoresid:\n buystoresid = o_sid\n if o_num and o_num != \"0\" :\n menulist.append([o_menu, o_num,o_menu_id])\n\n \"\"\"\n 가게 정보, 메뉴 정보, 장바구니 페이지\n 가게 정보 및 메뉴 정보를 확인하기 위함\n \"\"\"\n\n sql = \"SELECT * FROM stores WHERE store_id = '{storeid}'\".format(storeid = buystoresid)\n cur.execute(sql)\n store = cur.fetchall()\n\n sql = \"SELECT * FROM menu WHERE store_id = '{storeid}'\".format(storeid = buystoresid)\n cur.execute(sql)\n menu = cur.fetchall()\n conn.close()\n\n return render_template(\"order.html\", info=userinfo, store=store[0], menu=menu, menulist=menulist, sid=buystoresid)\n\n# 주문 메뉴 확인, 결제 수단\[email protected](\"/login/user/customer/storebuy/pay\", methods = ['GET', 'POST'])\ndef pay():\n buystoresid = request.form.get('sid')\n if not menulist:\n return render_template(\"payerror.html\")\n\n \"\"\"\n 결제 수단\n 로그인한 구매자의 결제 수단 및 결제정보를 확인하여 원하는 방식으로 결제하기 위함\n \"\"\"\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n sql = \"SELECT * FROM payment WHERE customer_id = '{userid}'\".format(userid = userid['id'])\n cur.execute(sql)\n payment = cur.fetchall()\n conn.close()\n\n return render_template(\"realpay.html\", info=userinfo, sid=buystoresid, payment=payment, menulist=menulist)\n\n# Order 및 Orderdetail\[email protected](\"/login/user/customer/storebuy/pay/done\", methods=['GET', 'POST'])\ndef realpay():\n \"\"\"\n 구매자의 주문을 추가와 주문의 상세 내용을 추가하기 위함\n \"\"\"\n sid = request.form.get('sid')\n pay_type = request.form.get('pay_type')\n pay_num = request.form.get('pay_num')\n pay_id = request.form.get('pay_id')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n sql = \"INSERT INTO orders (order_time,delivery_done,del_id,payment_id,customer_id,store_id) VALUES (now(),0,null,{pay_id},{cus_id},{sid})\".format(pay_id = pay_id,cus_id = userid['id'],sid = sid)\n cur.execute(sql)\n conn.commit()\n sql = \"SELECT * FROM orders WHERE payment_id = {payid} AND customer_id = {cusid} AND order_time BETWEEN Date_sub(now(),INTERVAL 30 SECOND) AND Date_Add(now(),INTERVAL 30 SECOND)\".format(payid = pay_id,cusid = userid['id'])\n cur.execute(sql)\n order_id = cur.fetchone()['order_id']\n\n for i in menulist:\n sql = \"INSERT INTO orderdetail (order_id,menu_id,quantity) VALUES ({orderid},{menuid},{quantity})\".format(orderid=order_id,menuid = i[2],quantity = i[1])\n cur.execute(sql)\n conn.commit()\n conn.close()\n \n\n del menulist[:]\n return redirect(\"/login/user/customer\")\n\n# 주문Order 화면\[email protected](\"/login/user/customer/order\", methods=['GET', 'POST'])\ndef cusorder():\n \"\"\"\n 주문Order 화면\n 주문한 가게 이름, 주문한 총 메뉴 수, 결제수단, 주문 시간, 배달 완료 여부를 확인하기 위함\n \"\"\"\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n sql = \"SELECT * FROM orders WHERE customer_id = {cusid}\".format(cusid = userid['id']) \n cur.execute(sql)\n orders = cur.fetchall()\n snames = []\n payments = []\n orderdetails = []\n for_counter = len(orders)\n\n for i in orders:\n quantity = 0\n sql = \"SELECT * FROM stores WHERE store_id = {store_id}\".format(store_id = i['store_id'])\n cur.execute(sql)\n sname = cur.fetchone()['sname']\n snames.append(sname)\n sql = \"SELECT * FROM payment WHERE payment_id = {payid}\".format(payid = i['payment_id'])\n cur.execute(sql)\n pay = cur.fetchone()['pay_type']\n payments.append(pay)\n sql = \"SELECT * FROM orderdetail WHERE order_id = {orderid}\".format(orderid = i['order_id'])\n cur.execute(sql)\n orderdetail = cur.fetchall()\n for detail in orderdetail:\n quantity = quantity + detail['quantity']\n orderdetails.append(quantity)\n \n conn.close()\n\n return render_template(\"payhistory.html\", info=userinfo, ord=orders,pay = payments,ordd = orderdetails,sn = snames,for_counter = for_counter)\n\n\n# ========== 판매자 ==========\n\n# 판매자 개인 정보 변경\[email protected](\"/login/user/schange\", methods=['GET', 'POST'])\ndef schange():\n \"\"\"\n 판매자 개인 정보 변경 페이지\n 현재 비밀번호와 이름을 확인하기 위함\n \"\"\"\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT * FROM sellers WHERE sellers_id = {userid}\".format(userid = userid['id'])\n cur.execute(sql)\n seller = cur.fetchall()\n conn.close()\n\n sname = seller[0]['name']\n\n return render_template(\"schange.html\", info=userinfo, name=sname, passwd=userid['passwd'])\n\n# 판매자 비밀번호 변경\[email protected](\"/login/user/schange/pw\", methods=['GET', 'POST'])\ndef spw():\n \"\"\"\n 로그인한 판매자 비밀번호 변경\n \"\"\"\n changedPw = request.form.get('passwd')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"UPDATE sellers SET passwd = '{changedPw}' WHERE sellers_id = {userid}\".format(changedPw = changedPw,userid = userid['id'])\n cur.execute(sql) \n \n conn.commit()\n conn.close()\n\n userid['passwd'] = changedPw\n\n return redirect(\"/login/user\")\n\n# 판매자 이름 변경\[email protected](\"/login/user/schange/name\", methods=['GET', 'POST'])\ndef schname():\n \"\"\"\n 로그인한 판매자 이름 변경\n \"\"\"\n changedName = request.form.get('name')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"UPDATE sellers SET name = '{changedName}' WHERE sellers_id = {userid}\".format(changedName = changedName,userid = userid['id'])\n cur.execute(sql)\n\n conn.commit()\n\n conn.close()\n\n return redirect(\"/login/user\")\n\n# 소유중인 가게 리스트\[email protected](\"/login/user/seller\", methods=['GET', 'POST'])\ndef seller():\n \"\"\"\n 소유중인 가게 리스트 페이지\n 로그인한 판매자가 소유중인 가게 리스트를 보여주기 위함\n \"\"\"\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT * FROM stores WHERE seller_id = {userid}\".format(userid = userid['id']) \n cur.execute(sql)\n\n store = cur.fetchall()\n conn.close()\n\n return render_template(\"seller.html\", info=userinfo, store=store)\n\n# 가게 정보, 메뉴 정보, 현재 주문\[email protected](\"/login/user/seller/store\", methods=['GET', 'POST'])\ndef store():\n sid = request.form.get('sid')\n emails = []\n pay_types = []\n if sid:\n userid[\"storesid\"] = sid\n sid = userid[\"storesid\"]\n \"\"\"\n 가게 정보, 메뉴 정보, 현재 주문 페이지\n 가제 정보, 메뉴 정보, 현재 주문을 확인하기 위함\n \"\"\"\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"SELECT * FROM stores WHERE store_id = {storesid}\".format(storesid = userid['storesid'])\n cur.execute(sql)\n store = cur.fetchall()\n sql = \"SELECT * FROM menu WHERE store_id = {storesid}\".format(storesid = userid['storesid'])\n cur.execute(sql)\n menu = cur.fetchall()\n sql = \"SELECT * FROM orders WHERE store_id = {storesid}\".format(storesid = userid['storesid'])\n cur.execute(sql)\n order = cur.fetchall()\n for_counter = len(order)\n for i in order:\n sql = \"SELECT email FROM customers WHERE customer_id = {cusid}\".format(cusid = i['customer_id'])\n cur.execute(sql)\n email = cur.fetchone()\n emails.append(email)\n sql = \"SELECT pay_type FROM payment WHERE payment_id = {payid}\".format(payid = i['payment_id'])\n cur.execute(sql)\n pay_type = cur.fetchone()\n pay_types.append(pay_type)\n\n conn.close()\n\n return render_template(\"store.html\", info=userinfo, store=store[0], menu=menu, sid=sid, order=order,emails = emails,pay_types = pay_types,f = for_counter)\n\n\n# 메뉴 이름 변경\[email protected](\"/login/user/seller/store/menuchan\", methods=['GET', 'POST'])\ndef menuchan():\n sid = request.form.get('sid')\n menuname = request.form.get('menu')\n newname = request.form.get('newname')\n\n \"\"\"\n 메뉴 이름 변경\n 해당 가게의 새로 입력 받은 메뉴 이름으로 변경하기 위함\n \"\"\"\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n \n sql = \"SELECT * FROM menu WHERE store_id = {storesid} AND name = '{menuname}'\".format(storesid = sid,menuname = menuname)\n cur.execute(sql)\n menu = cur.fetchall()\n\n sql = \"UPDATE menu SET name = '{newname}' WHERE menu_id = {menuid}\".format(newname = newname, menuid = menu[0]['menu_id'])\n\n cur.execute(sql)\n conn.commit()\n conn.close()\n\n return redirect(\"/login/user/seller/store\")\n\n# 메뉴 삭제 \[email protected](\"/login/user/seller/store/menudel\", methods=['GET', 'POST'])\ndef menudel():\n sid = request.form.get('sid')\n menu = request.form.get('menu')\n \"\"\"\n 메뉴 삭제(현재 주문중인 메뉴는 삭제 불가)\n 해당 가게의 메뉴를 삭제하기 위함\n \"\"\"\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n allDeliveryDone = 1\n sql = \"SELECT * FROM orderdetail WHERE menu_id = {menuid}\".format(menuid = menu)\n cur.execute(sql)\n orderdetail = cur.fetchall()\n orders = []\n for i in orderdetail:\n sql = \"SELECT * FROM orders WHERE order_id = {orderid}\".format(orderid = i['order_id'])\n cur.execute(sql)\n order = cur.fetchone()\n orders.append(order)\n\n for i in orders:\n if i['delivery_done'] == 0:\n allDeliveryDone = 0\n \n if not orderdetail or allDeliveryDone == 1:\n sql = \"DELETE FROM menu WHERE menu_id = {menuid}\".format(menuid = menu)\n cur.execute(sql)\n conn.commit()\n \n conn.close()\n #먼저 메뉴 아래에 order 나 orderdetail이 있는지 확인하고\n # 있으면 삭제 불가능하게 만들기\n\n return redirect(\"/login/user/seller/store\")\n\n# 메뉴 추가\[email protected](\"/login/user/seller/store/menuadd\", methods=['GET', 'POST'])\ndef menuadd():\n \"\"\"\n 메뉴 추가\n 해당 가게의 새로 입력(메뉴명, 가격, 할인율) 받은 메뉴를 추가하기 위함\n \"\"\"\n newMenuName = request.form.get('newmenuname')\n newMenuPrice = request.form.get('newmenuprice')\n newMenuEvent = request.form.get('newmenuevent')\n sid = request.form.get('sid')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n sql = \"INSERT INTO menu (name,price,event,store_id) VALUES ('{menuName}',{price},{event},{storeid})\".format(menuName = newMenuName,price = newMenuPrice,event = newMenuEvent,storeid = sid)\n\n cur.execute(sql)\n conn.commit()\n conn.close()\n\n return redirect(\"/login/user/seller/store\")\n\n# 배달원 할당\[email protected](\"/login/user/seller/store/ordercheck\", methods=['GET', 'POST'])\ndef ordercheck():\n orderinfo = request.form.get('orderinfo')\n \"\"\"\n 배달원 할당\n 현재 주문에 대해 배달 가능한 배달대행원을 최대 5명까지 확인하기 위함(남은 횟수가 높은 순서로 확인) stock 에 따라서\n \n 배달 가능한 배달대행원:\n 1. 배달 가능한 지역(가게와 가까운 지역) area = area\n 2. 현재 배달 가능한 상태 -> available != 0\n 3. 남은 횟수가 0이 아닌 상태 -> stock != 0\n \"\"\"\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n sql = \"SELECT * FROM orders WHERE order_id = {orderid}\".format(orderid = orderinfo)\n cur.execute(sql)\n order = cur.fetchone()\n\n sql = \"SELECT * FROM customers WHERE customer_id = {cusid}\".format(cusid = order['customer_id'])\n cur.execute(sql)\n customer = cur.fetchone()\n\n sql = \"SELECT * FROM delivery WHERE area = '{address}' AND available = 1 AND NOT stock = 0 ORDER BY stock DESC LIMIT 5\".format(address = customer['address'])\n cur.execute(sql)\n delivery = cur.fetchall()\n\n conn.close()\n\n return render_template(\"ordercheck.html\", info=userinfo, view=delivery, orderinfo=orderinfo)\n\n# 현재 주문에 배달원 ID 할당\[email protected](\"/login/user/seller/store/ordercheck/real\", methods=['GET', 'POST'])\ndef orderreal():\n \"\"\"\n 현재 주문에 배달원 ID 할당\n 현재 주문에 대해 배달대행원의 배달원 ID를 할당하기 위함\n \"\"\"\n did = request.form.get('did')\n order_id = request.form.get('orderinfo')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"UPDATE orders SET del_id = {delid} WHERE order_id = {orderid}\".format(delid = did,orderid = order_id)\n\n cur.execute(sql)\n conn.commit()\n conn.close()\n\n return redirect(\"/login/user/seller/store\")\n \n# 주문 취소\[email protected](\"/login/user/seller/store/orderdel\", methods=['GET', 'POST'])\ndef orderdel():\n \"\"\"\n 주문 취소\n 현재 주문을 취소하기 위함\n \"\"\"\n order_id = request.form.get('orderinfo')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"DELETE FROM orders WHERE order_id = {orderid}\".format(orderid = order_id)\n cur.execute(sql)\n conn.commit()\n\n conn.close()\n \n return redirect(\"/login/user/seller/store\")\n\n# ========== 배달대행원 ==========\n\n# 현재 배송 중인 주문 - 미완/////////////////////////////////////////////////////////\[email protected](\"/login/user/delivery\", methods=['GET', 'POST'])\ndef delivery():\n \"\"\"\n 현재 OOO님의 배송 중인 주문 페이지\n 가게 이름, 주문자 이름, 주문자 전화번호, 배송지, 주문시간, 배송 완료 여부를 확인하기 위함\n \"\"\"\n template_order = []\n snames = []\n customers = []\n\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n sql = \"SELECT * FROM orders WHERE del_id = {delid} AND delivery_done = 0\".format(delid = userid['id'])\n cur.execute(sql)\n orders = cur.fetchall()\n for_counter = len(orders)\n for i in orders:\n sql = \"SELECT sname FROM stores WHERE store_id = {sid}\".format(sid = i['store_id'])\n cur.execute(sql)\n sname = cur.fetchone()\n snames.append(sname)\n sql = \"SELECT * FROM customers WHERE customer_id = {cusid}\".format(cusid = i['customer_id'])\n cur.execute(sql)\n customer = cur.fetchone()\n customers.append(customer)\n \n sql = \"SELECT * FROM delivery WHERE del_id = {delid}\".format(delid = userid['id'])\n cur.execute(sql)\n deli = cur.fetchone()\n conn.close()\n\n return render_template(\"delivery.html\", info=userinfo, order=orders, deli=deli,sname=snames,customers=customers,f = for_counter)\n\n# 배송 완료 -테스트 안됨 ////////////////////////////////////////////////////\[email protected](\"/login/user/delivery/deliverydone\", methods = ['GET', 'POST'])\ndef deliverydone():\n \"\"\"\n 배송 완료\n 배달대행원이 배달 완료 시 배달 완료 여부를 배달 완료로 갱신하기 위함\n \"\"\"\n done = request.form.get('done')\n orderid = request.form.get('orderid')\n\n conn = pymysql.connect(**db_connector)\n cur = conn.cursor(pymysql.cursors.DictCursor)\n\n sql = \"UPDATE orders SET delivery_done = {done} WHERE order_id = {orderid}\".format(done = 1,orderid = orderid)\n\n cur.execute(sql)\n conn.commit()\n conn.close()\n\n return redirect(\"/login/user/delivery\")\n\nif __name__ == '__main__':\n app.run(debug = True)\n" }, { "alpha_fraction": 0.6836262941360474, "alphanum_fraction": 0.7236354947090149, "avg_line_length": 34.74380111694336, "blob_id": "d2d5bd8a1f2b07998f5722a3428a117eef95f189", "content_id": "da872d26262642ebe253bd52b4e11e75d29dfbb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 4420, "license_type": "no_license", "max_line_length": 155, "num_lines": 121, "path": "/making_databse.sql", "repo_name": "qkrwjdan/hanyangProject", "src_encoding": "UTF-8", "text": "show databases;\ndrop database pj;\ncreate database pj;\nuse pj;\n\n\nCREATE TABLE customers (\n name varchar(50) NOT NULL,\n passwd varchar(20) NOT NULL,\n phone varchar(20) NOT NULL,\n email varchar(50) NOT NULL,\n address varchar(100) NOT NULL,\n customer_id int(11) NOT NULL,\n PRIMARY KEY (customer_id)\n);\n\nLOAD DATA INFILE '/Users/macbookair/Desktop/data_copy/customer.csv' INTO table customers FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\r\\n';\n\nCREATE TABLE sellers (\n sellers_id int(11) NOT NULL,\n name varchar(50) NOT NULL,\n phone varchar(20) NOT NULL,\n email varchar(50) NOT NULL,\n passwd varchar(50) NOT NULL,\n PRIMARY KEY (sellers_id)\n);\n\nINSERT INTO sellers (sellers_id,name,phone,email,passwd) VALUES (0,'고태형','01020888268','[email protected]','sbs6y79');\nLOAD DATA INFILE '/Users/macbookair/Desktop/data_copy/seller.csv' INTO table sellers FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\r\\n' IGNORE 1 LINES ;\n\n\nCREATE TABLE delivery (\n del_id int(11) NOT NULL,\n name varchar(20) NOT NULL,\n email varchar(50) NOT NULL,\n passwd varchar(50) NOT NULL,\n area varchar(50) NOT NULL,\n phone varchar(20) NOT NULL,\n available tinyint(1) NOT NULL,\n stock int(11) NOT NULL,\n PRIMARY KEY (del_id)\n);\n\nINSERT INTO delivery VALUES(0,'정예란','[email protected]','4196dqr','서울특별시 서초구 내곡동','01083726706',1,6);\nLOAD DATA INFILE '/Users/macbookair/Desktop/data_copy/delivery.csv' INTO table delivery FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\r\\n' IGNORE 1 LINES;\n\nCREATE TABLE stores (\n store_id int(11) NOT NULL,\n address varchar(50) NOT NULL,\n sname varchar(50) NOT NULL,\n phone varchar(20) NOT NULL,\n seller_id int(11) NOT NULL,\n open_time time NOT NULL,\n close_time time NOT NULL,\n type varchar(20) NOT NULL,\n PRIMARY KEY (store_id),\n KEY seller_id (seller_id),\n CONSTRAINT stores_ibfk_1 FOREIGN KEY (seller_id) REFERENCES sellers (sellers_id)\n);\n\nINSERT INTO stores VALUES(28141,'서울특별시 강남구 논현동','일호감자탕(분점)','0283720865',35765,'10:00','19:00','패스트푸드');\nLOAD DATA INFILE '/Users/macbookair/Desktop/data_copy/store.csv' INTO table stores FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\r\\n' IGNORE 1 LINES;\n\n\nCREATE TABLE payment (\n payment_id int(11) NOT NULL,\n customer_id int(11) NOT NULL,\n pay_num bigint(20) NOT NULL,\n pay_type tinyint(4) NOT NULL,\n PRIMARY KEY (payment_id),\n KEY customer_id (customer_id),\n CONSTRAINT payment_ibfk_1 FOREIGN KEY (customer_id) REFERENCES customers (customer_id)\n);\n\nLOAD DATA INFILE '/Users/macbookair/Desktop/data_copy/pay.csv' INTO table payment FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\r\\n';\n\nCREATE TABLE menu (\n menu_id int(11) NOT NULL AUTO_INCREMENT,\n name varchar(50) COLLATE utf8mb4_general_ci NOT NULL,\n price int(11) NOT NULL,\n event float NOT NULL,\n store_id int(11) NOT NULL,\n PRIMARY KEY (menu_id),\n KEY store_id (store_id),\n CONSTRAINT menu_ibfk_1 FOREIGN KEY (store_id) REFERENCES stores (store_id)\n);\n\nINSERT INTO menu VALUES(1,'기름사탕유부단디',5594,0.75997,37022);\nLOAD DATA INFILE '/Users/macbookair/Desktop/data_copy/menu.csv' INTO table menu FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\r\\n' IGNORE 1 LINES;\n\n\nCREATE TABLE orders (\n order_id int(11) NOT NULL AUTO_INCREMENT,\n order_time timestamp NOT NULL,\n delivery_done tinyint(4) NOT NULL,\n del_id int(11) DEFAULT NULL,\n customer_id int(11) NOT NULL,\n store_id int(11) NOT NULL,\n payment_id int(11) DEFAULT NULL,\n PRIMARY KEY (order_id),\n KEY del_id (del_id),\n KEY customer_id (customer_id),\n KEY store_id (store_id),\n KEY payment_id (payment_id),\n CONSTRAINT orders_ibfk_1 FOREIGN KEY (del_id) REFERENCES delivery (del_id),\n CONSTRAINT orders_ibfk_2 FOREIGN KEY (customer_id) REFERENCES customers (customer_id),\n CONSTRAINT orders_ibfk_3 FOREIGN KEY (store_id) REFERENCES stores (store_id),\n CONSTRAINT orders_ibfk_4 FOREIGN KEY (payment_id) REFERENCES payment (payment_id)\n);\n\nCREATE TABLE orderdetail (\n detail_order_id int(11) NOT NULL AUTO_INCREMENT,\n order_id int(11) NOT NULL,\n menu_id int(11) DEFAULT NULL,\n quantity int(11) NOT NULL,\n PRIMARY KEY (detail_order_id),\n KEY order_id (order_id),\n KEY menu_id (menu_id),\n CONSTRAINT orderdetail_ibfk_1 FOREIGN KEY (order_id) REFERENCES orders (order_id) ON DELETE CASCADE ON UPDATE CASCADE,\n CONSTRAINT orderdetail_ibfk_2 FOREIGN KEY (menu_id) REFERENCES menu (menu_id) ON DELETE SET NULL ON UPDATE SET NULL\n);" } ]
5
amit1995sharma/Basic-Web-Container
https://github.com/amit1995sharma/Basic-Web-Container
8423f982b219f837b98aabeeb147b6345c9e0006
8a28f5af4590f04fa3e8f69f6574d7d5b29eb0d7
336766cb9f2ef6c1be3b65d5b6500cdcac8cc777
refs/heads/master
2021-05-09T13:51:44.697986
2018-01-26T15:40:50
2018-01-26T15:40:50
119,046,560
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6285714507102966, "alphanum_fraction": 0.6285714507102966, "avg_line_length": 51.5, "blob_id": "12bd6a3fa05db23a15c92b9a9950601745f74610", "content_id": "3733ebe9ef6acd78489aa13f515070896cfdb75a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 54, "num_lines": 2, "path": "/urls.py", "repo_name": "amit1995sharma/Basic-Web-Container", "src_encoding": "UTF-8", "text": "urlpatterns = [\"r'^$',controller.index, 'index.html' \"\n , \"r'^hello$',controller.hello, 'hello.html\"]\n" }, { "alpha_fraction": 0.6228813529014587, "alphanum_fraction": 0.6228813529014587, "avg_line_length": 32.71428680419922, "blob_id": "2e1513426d5005a1e87ed87bf0c00c67318c4563", "content_id": "3b5098f993a07d9dffccaad962a2e8e0d715258e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 150, "num_lines": 7, "path": "/controller.py", "repo_name": "amit1995sharma/Basic-Web-Container", "src_encoding": "UTF-8", "text": "def index():\n return {'user': 'David'}\n\n\ndef hello():\n return {'Name': 'Amit',\n 'Assment': 'Data Need to be printed. If You want to add more file edit urls.py , controller.py and create html for that file <br> thanks'}\n" }, { "alpha_fraction": 0.7469387650489807, "alphanum_fraction": 0.7877551317214966, "avg_line_length": 17.11111068725586, "blob_id": "0e62bf7dab97acb4426732789e1378091a7eb0d4", "content_id": "bd94251ace2d006bfc8e781769355dcf7f4e776b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 490, "license_type": "no_license", "max_line_length": 56, "num_lines": 27, "path": "/README.md", "repo_name": "amit1995sharma/Basic-Web-Container", "src_encoding": "UTF-8", "text": "# Basic-Web-Container\nMini Demo framework for python\n\nrunning command :\npython run.py preview 0:8080 \n\n\nuser control+c to terminate the server\n\nLibrary Required :\n1)http.server\n2)sys\n3)os\n4)re\n\nurl.py contains the url path and controller funtion\nController contains the json format for the html request\n\nrun.py start the server and control the request\n\nintex.html\nuse %(user)s to add data dynamically\n\nlocalhost:8080\nlocalhost:8080/hello\n\nIf page not found the brower will throw 404 error\n\n" }, { "alpha_fraction": 0.5503565073013306, "alphanum_fraction": 0.5619429349899292, "avg_line_length": 31.521739959716797, "blob_id": "5f7fb520d9e83d08c866f93ab785e3c81e9a1e46", "content_id": "4d38419912f888c44c729cda5fd5b8a2331f3dac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2244, "license_type": "no_license", "max_line_length": 77, "num_lines": 69, "path": "/run.py", "repo_name": "amit1995sharma/Basic-Web-Container", "src_encoding": "UTF-8", "text": "import sys\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nfrom urls import urlpatterns\nimport controller\nimport os\nimport re\nimport glob\n\n\nclass requesthandlerclass(BaseHTTPRequestHandler):\n def _set_headers(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def printpage(self, filename, fun_pattern):\n try:\n f_encripted = open(filename, \"r\").read()\n # print(f_encripted)\n # creating a funtion\n method_to_call = getattr(controller, fun_pattern)\n f_decripted = f_encripted % method_to_call()\n self._set_headers()\n self.wfile.write(f_decripted.encode())\n except:\n self.send_error(404, \"File not found \" + self.path)\n\n def getPageData(self, urlpath):\n # print(urlpath)\n # print(urlpatterns)\n for i in range(len(urlpatterns)):\n pattern = urlpatterns[i].split(',')[0].split('\\'')[1]\n if (re.match(pattern, urlpath, re.I)):\n htmlfile = urlpatterns[i].split(',')[2].split('\\'')[1]\n pattern = urlpatterns[i].split(',')[1].split('.')[-1]\n if htmlfile in os.listdir():\n self.printpage(htmlfile, pattern)\n return\n else:\n self.send_error(404, \"File not found \" + self.path)\n return\n self.send_error(404, \"File not found \" + self.path)\n\n def do_GET(self):\n self.getPageData(self.path.rsplit(\"/\")[-1])\n\n # self._set_headers()\n # self.wfile.write(\"<html><body><h1>hi!</h1></body></html>\".encode())\n\n\ndef run(portId):\n try:\n server_address = ('localhost', portId)\n server = HTTPServer(server_address, requesthandlerclass)\n server.serve_forever()\n except KeyboardInterrupt:\n print(\"you interupted the code\")\n server.close_request()\n\n\nif __name__ == \"__main__\":\n previewCmp = sys.argv[1]\n if previewCmp == 'preview' and len(sys.argv) == 3:\n portid = int(sys.argv[2].split(':')[-1])\n print(\"control+c to quit\")\n run(portid)\n else:\n print(\"Wrong input. Try again\")\n exit(0)\n" } ]
4
bayuarifbudiman/flaskobjectdetection
https://github.com/bayuarifbudiman/flaskobjectdetection
1effa06377f3a6b0e7d7636b0f68d1d7cf9ef730
883e460265f1e78d74c7da1e4dca8a2ee9441b33
4f338029bb1680f0efe773baa51325b101e879a9
refs/heads/master
2022-12-19T09:13:03.805301
2020-02-09T07:57:24
2020-02-09T07:57:24
296,239,844
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5920439958572388, "alphanum_fraction": 0.6064324975013733, "avg_line_length": 32.75714111328125, "blob_id": "d6eb8e09e3c937e68abe7c1b570572332ac3857c", "content_id": "908dd5a305ed328ff74698d00c20c142a4a68cdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2363, "license_type": "no_license", "max_line_length": 90, "num_lines": 70, "path": "/server.py", "repo_name": "bayuarifbudiman/flaskobjectdetection", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, url_for,Response\nfrom flask_socketio import SocketIO,emit\nfrom datetime import datetime\nimport cv2\nimport os\n\napp = Flask(__name__)\nsocketio = SocketIO(app)\n\nvideo = cv2.VideoCapture(0)\ncar_cascade = cv2.CascadeClassifier('cascade 20.xml')\nimage_name = []\n\[email protected]('/')\[email protected]('/streaming')\ndef streaming():\n return render_template('streaming.html',title = 'streaming')\n\ndef gen():\n \"\"\"Video streaming generator function.\"\"\"\n while True:\n rval, frame = video.read()\n\n #detect cars in the video\n cars3 = car_cascade.detectMultiScale(frame, 1.3,8)\n take_photo = False\n for (x,y,w,h) in cars3:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)\n CoordXCentroid = int((x+x+w)/2)\n CoordYCentroid = int((y+y+h)/2)\n ObjectCentroid = (CoordXCentroid,CoordYCentroid)\n cv2.circle(frame, ObjectCentroid, 5, (0,255,0), 5) \n\n global image_name \n entries = os.listdir('D:/Latian/python/flask_imageprocessing/static/gallery/')\n if not image_name == entries:\n image_name = entries\n \n\n '''day = datetime.today()\n current_day=(day.strftime('%d-%m-%Y'))\n now = datetime.now()\n current_time = now.strftime(\"%H-%M-%S\")\n path = 'D:/Latian/python/flask_imageprocessing/static/gallery/'\n\n if not cv2.imwrite(path + current_day +\"_\"+ current_time + \".jpg\", frame):\n print(\"Could not write image\")'''\n cv2.imwrite('t.jpg', frame)\n \n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + open('t.jpg', 'rb').read() + b'\\r\\n')\n\[email protected]('/video_feed')\ndef video_feed():\n \"\"\"Video streaming route. Put this in the src attribute of an img tag.\"\"\"\n return Response(gen(),mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\[email protected]('/image')\ndef image():\n '''while True:\n image_name = []\n entries = os.listdir('D:/Latian/python/flask_imageprocessing/static/gallery/')\n if not image_name == entries:\n image_name = entries\n return render_template('image.html',title='image',images=image_name)'''\n return render_template('image.html',title='image',images=image_name)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True, threaded=True)\n" } ]
1
reg-11/122-Project
https://github.com/reg-11/122-Project
8955400b22a26c76fe4a0810c75535fef40d9748
68a654f43099ab1f6a495ccf76de8813e2d95985
ad21693877574538236018fa20cec35242e55866
refs/heads/master
2023-05-14T06:58:54.292631
2021-06-09T08:12:07
2021-06-09T08:12:07
369,425,628
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7125325202941895, "alphanum_fraction": 0.7125325202941895, "avg_line_length": 39.75757598876953, "blob_id": "db46e478fe42142aeec1187d42c21332412ca8c3", "content_id": "80329a573d505e8679a32b98953f5c12ab7eda98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2689, "license_type": "no_license", "max_line_length": 98, "num_lines": 66, "path": "/esko_app/urls.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "from django.urls import path, include\n\nfrom .views import (PostListView, PostDetailView,PostCreateView, PostUpdateView, \n\tPostDeleteView, AddCommentView, HomeListView, PasswordsChangeView, \n\tTagIndexView, ReportView, ReportDeleteView, ReportPostDeleteView)\n\nfrom . import views\n\nfrom django.contrib.auth import views as auth_views\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\napp_name = 'esko_app'\nurlpatterns = [\n\tpath('', views.index, name='index'),\n\tpath('signup/', views.signup, name='signup'),\n\tpath('login/', views.loginPage, name='login'),\n\n\tpath('home/', views.home, name='home'),\n\tpath('profile/', views.profile, name='profile'),\n\n\tpath('category/', views.category, name='category'),\n\tpath('search_tags/', views.search_tags, name='search_tags'),\n\t# path('sell/', views.sell, name='sell'),\n\n\tpath('profileOther/<username>/', views.profileOther, name='profile-other'),\n\tpath('profileOtherComment/<username>/', views.profileOtherComment, name='profile-other-comment'),\n\n\tpath('postCategory/', views.PostByCategory, name='post_by_category'),\n\tpath('searchTag/', views.SearchByTag, name='search_by_tags'),\n\n\tpath('reported-posts/', views.reportedPosts, name='reported-posts'),\n\tpath('post/<int:pk>/report', ReportView.as_view(), name='report-post'),\n\tpath('report/<int:pk>/delete/', ReportDeleteView.as_view(), name='report-delete'),\n\tpath('rpost/<int:pk>/delete/', ReportPostDeleteView.as_view(), name='rpost-delete'),\n\n\n\t# path('home/', HomeListView.as_view(), name='home'),\n\t# path('profile/', PostListView.as_view(), name='profile'),\n\tpath('about/', views.about, name='about'),\n\tpath('logout/', views.logoutUser, name='logout'),\n\n\tpath('password-reset/', \n\t\tPasswordsChangeView.as_view(template_name='esko_app/password_reset.html'),\n\t\tname= 'password_reset'),\t\n\tpath('password-reset/done/', views.password_success, name= 'password_success'),\n\t\n\t\n\tpath('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),\n\tpath('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'),\n\tpath('post/new/', PostCreateView.as_view(), name='post-create'),\n\tpath('post/<int:pk>/delete/', PostDeleteView.as_view(), name='post-delete'),\n\tpath('profileSettings/', views.profileSettings, name='profileSettings'),\n\tpath('like/<int:pk>/',views.LikeView, name='like_post'),\n\t# path('likeHome/',views.LikeViewHome, name='like_post_home'),\n\tpath('post/<int:pk>/comment', AddCommentView.as_view(), name='add_comment'),\n\n\tpath('tags/<slug:tag_slug>/', TagIndexView.as_view(), name='posts_by_tags'),\n\t\n\t# path('post/new/', views.post, name='post-create'),\n\n]\n\nif settings.DEBUG:\n\turlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)" }, { "alpha_fraction": 0.5473340749740601, "alphanum_fraction": 0.5745375156402588, "avg_line_length": 30.689655303955078, "blob_id": "6318def8def045815d082577adc0cb21e5f18a1d", "content_id": "33f85a242eb515cb7e5f82175a4bd9fd0ed6b9c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 919, "license_type": "no_license", "max_line_length": 162, "num_lines": 29, "path": "/esko_app/migrations/0027_auto_20210530_1506.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-05-30 07:06\n\nfrom django.db import migrations, models\nimport taggit.managers\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('taggit', '0003_taggeditem_add_unique_index'),\n ('esko_app', '0026_delete_report'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='category',\n field=models.CharField(choices=[('sell', 'sell'), ('find', 'find'), ('services/rent', 'services/rent'), ('swap', 'swap')], max_length=13),\n ),\n migrations.RemoveField(\n model_name='post',\n name='tags',\n ),\n migrations.AddField(\n model_name='post',\n name='tags',\n field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),\n ),\n ]\n" }, { "alpha_fraction": 0.8022388219833374, "alphanum_fraction": 0.8022388219833374, "avg_line_length": 28.55555534362793, "blob_id": "7e5fe9741410701cabd354d42745ea12f681ccc9", "content_id": "f6722f21d382ad6aede8e5564823c8e925257d81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 60, "num_lines": 9, "path": "/esko_app/admin.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Profile, Post, Category, Comment, Report\n\n# Register your models here.\nadmin.site.register(Profile)\nadmin.site.register(Post)\nadmin.site.register(Comment)\nadmin.site.register(Category)\nadmin.site.register(Report)\n\n\n" }, { "alpha_fraction": 0.5316692590713501, "alphanum_fraction": 0.5634945631027222, "avg_line_length": 32.05154800415039, "blob_id": "a463a28bffb105d63a79fb6f8915dd84c2cba6e0", "content_id": "032e4f191ae88312a31176af8695843b63978fcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 3205, "license_type": "no_license", "max_line_length": 202, "num_lines": 97, "path": "/templates/esko_app/signup.html", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "{% load socialaccount %}\n\n<!DOCTYPE html>\n<html>\n<head>\n\t<title>Esko</title>\n\n\t<link href=\"https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css\" rel=\"stylesheet\" integrity=\"sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x\" crossorigin=\"anonymous\">\n\n\t\n\t<style type=\"text/css\">\n\t\t.form-control:focus {\n border-color: #DBCA46;\n box-shadow: 0px 1px 1px rgba(0, 0, 0, 0.075) inset, 0px 0px 8px rgba(255, 100, 255, 0.5);\n }\n\t</style>\n\n</head>\n<body>\n\n\t<div class=\"row g-0\">\n\t\t{% if messages %}\n\t\t\t\t\t{% for message in messages %}\n\t\t\t\t\t\t<div class=\"alert alert-success\">\n\t\t\t\t\t\t\t{{ message }}\n\t\t\t\t\t\t</div>\n\n\t\t\t\t\t{% endfor %}\n\n\t\t\t\t{% endif %}\n\n\t\t<div class=\"col-md-5 g-0\">\n\t\t\t<div class=\"leftside d-flex justify-content-center align-items-center\" style=\"height: 100vh; width: 100%;\">\t\t\t\t\n\n\t\t\t\t<div class=\"col offset-4\">\t\n\n\t\t\t\t\t<div class=\"container\" style=\"background-color:#F5F5F5; padding: 25px; width: 90%\">\n\t\t\t\t\t\t<h1 class=\"font-weight-bold\" style=\"align-content: right;font-size: 60px; color:#DBCA46; font-weight: bold;\">Sign Up</h1> \n\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t<form method=\"POST\">\n\t\t\t\t\t\t\t{% csrf_token %}\n\t\t\t\n\t\t\t\t\t\t <div class=\"form-group\" id=\"fg\" style=\"margin-top: 20px;\">\n\t\t\t\t\t\t <!-- <input type=\"email\" class=\"form-control\" id=\"email\" placeholder=\"Enter UP-email\" style=\"margin-top: 5px;\" > -->\n\t\t\t\t\t\t {{ form.username }}\n\t\t\t\t\t\t </div>\n\n\n\t\t\t\t\t\t <div class=\"form-group\" id=\"fg\" style=\"margin-top: 5px;\">\n\t\t\t\t\t\t <!-- <input type=\"email\" class=\"form-control\" id=\"email\" placeholder=\"Enter UP-email\" style=\"margin-top: 5px;\" > -->\n\t\t\t\t\t\t {{ form.email }}\n\t\t\t\t\t\t </div>\n\t\t\t\t\t\t <div class=\"form-group\" style=\"margin-top: 5px;\">\n\t\t\t\t\t\t <!-- <input type=\"password\" class=\"form-control\" id=\"password\" placeholder=\"Password\" style=\"margin-top: 5px\"> -->\n\t\t\t\t\t\t {{ form.password1 }}\n\t\t\t\t\t\t \n\t\t\t\t\t\t </div>\n\t\t\t\t\t\t <div class=\"form-group\" style=\"margin-top: 5px;\">\n\t\t\t\t\t\t <!-- <input type=\"confirm_password\" class=\"form-control\" id=\"confirm_password\" placeholder=\"Confirm password\" style=\"margin-top: 5px\"> -->\n\t\t\t\t\t\t {{ form.password2 }}\n\t\t\t\t\t\t </div>\n\t\t\t\t\t\t \n\t\t\t\t\t\t <button type=\"submit\" class=\"btn btn-warning btn-lg\" style=\"font-size: 15px; color: white ; width:100%; margin-top: 30px; margin-left: 5px\">Sign Up</button>\n\t\t\t\t\t\t\n\t\t\t\t\t\t <!-- <a class=\"btn btn-danger btn-lg\" href=\"{% provider_login_url 'google' %}\" style=\"font-size: 15px; color: white ; width:100%; margin-top: 7px; margin-left: 5px\"> Sign Up with Google</a> -->\n\t\t\t\t\t\t \n\t\t\t\t\t\t</form>\n\t\t\t\t\t\t<hr style=\"width:100%;text-align:left;margin-left:0\">\n\n\t\t\t\t\t\t{% if form.errors %}\n\t\t\t\t\t\t {% for field in form %}\n\t\t\t\t\t\t {% for error in field.errors %}\n\t\t\t\t\t\t <p> {{ error }} </p>\n\t\t\t\t\t\t {% endfor %}\n\t\t\t\t\t\t {% endfor %}\n\t\t\t\t\t\t{% endif %}\n\n\t\t\t\t\t\t<h6 style=\"margin-top: 7px\">Already have an account? <a href=\"/esko_app/login/\" style=\"margin-left: 50px; color:#DBCA46;\" >Login</a></h6>\n\t\t\t\t\t</div>\n\t\t\t\t</div>\t\t\t\t\n\t\t\t</div>\n\t\t\t\n\t\t</div>\n\n\t\t<div class=\"col-md-6 g-0\">\n\t\t\t<div class=\"rightside d-flex justify-content-center align-items-center\" style=\"height: 100vh; width: 100%;\" >\n\t\t\t\t<img src=\"/static/landing_image.jpg\" style=\"width: 100%;height: 80%\" >\t\t\t\t\n\t\t\t</div>\n\t\t</div>\t\t\n\t</div>\n\n\n\n</body>\n</html>" }, { "alpha_fraction": 0.6535724401473999, "alphanum_fraction": 0.65727299451828, "avg_line_length": 29.504348754882812, "blob_id": "6fcdd439d93931babb9b6cf1e1e813a832a1a861", "content_id": "03862c7d2ef01974af78972340fb20ddfa233a60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3513, "license_type": "no_license", "max_line_length": 148, "num_lines": 115, "path": "/esko_app/forms.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "from django.forms import ModelForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django import forms\n\nfrom .models import Profile, Post, Comment, Report\n\n# from taggit.forms import TagField\n\nCATEGORIES = [\n\t('sell', 'sell'),\n\t('find', 'find'),\n\t('services/rent', 'services/rent'),\n\t('swap', 'swap'),\n]\n\nPROBLEMS = [\n \t\t('hate speech', 'hate speech'),\n \t\t('violence', 'violence'),\n \t\t('harassment', 'harassment'),\n \t\t('nudity', 'nudity'),\n \t\t('false information', 'false information'),\n \t\t('spam', 'spam'),\n \t\t('others', 'others')\n \t]\n\nclass CreateUserForm(UserCreationForm):\n\tpassword1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control','placeholder':'Password'}))\n\tpassword2 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control','placeholder':'Confirm Password'}))\n\t\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ['username', 'email', 'password1', 'password2']\n\n\t\twidgets = {\n\t\t\t'username': forms.TextInput(attrs={'class': 'form-control','placeholder':'Username'}),\n\t\t\t'email': forms.TextInput(attrs={'class': 'form-control','placeholder':'Enter UP email'}),\t\t\n\t\t}\n\n\tdef clean_email(self, *args, **kwargs):\n\t\temail = self.cleaned_data.get(\"email\")\n\t\tif not email.endswith(\"@up.edu.ph\"):\n\t\t\traise forms.ValidationError(\"This is not a valid email\")\n\t\treturn email\n\nclass UserUpdateForm(forms.ModelForm):\n\t# email = forms.TextInput(attrs={'class': 'form-control'})\n\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ['username', 'email']\n\n\t\twidgets = {\n\t\t\t'username': forms.TextInput(attrs={'class': 'form-control'}),\n\t\t\t# 'email': forms.TextInput(attrs={'class': 'form-control'}),\t\t\n\t\t}\n\n\n\nclass ProfileUpdateForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Profile\n\t\tfields = ['profile_pic', 'year_level','phone_number','sns_account','bio']\n\n\t\twidgets = {\n\t\t\t'profile_pic': forms.FileInput(attrs={'class': 'form-control', 'style': 'margin-left:15px; width:50%'}),\n\t\t\t'year_level': forms.TextInput(attrs={'class': 'form-control'}),\t\n\t\t\t'phone_number': forms.TextInput(attrs={'class': 'form-control'}),\t\n\t\t\t'sns_account': forms.TextInput(attrs={'class': 'form-control'}),\t\n\t\t\t'bio': forms.Textarea(attrs={'class': 'form-control'}),\t\t\n\t\t}\n\nclass CommentForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Comment\n\t\tfields = ['body']\n\n\t\twidgets = {\n\t\t\t# 'commenter': forms.TextInput(attrs={'class': 'form-control'}),\n\t\t\t'body': forms.Textarea(attrs={'rows':3, 'class': 'form-control', 'style': 'width:93%;margin-left:35px','placeholder':'Type your comment here'}),\t\n\t\t}\n\n\n\nclass CreatePostForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = Post\n\t\tcategory = forms.ChoiceField(choices=CATEGORIES)\n\t\t# tags = TagField()\n\t\t\n\t\tfields = ['category', 'description', 'tags','post_image']\n\t\t# fields = ['category', 'description', 'tags']\n\n\t\twidgets = {\n\t\t\t'description': forms.Textarea(attrs={'class': 'form-control', 'rows':4}),\t\n\t\t\t'tags': forms.TextInput(attrs={'class': 'form-control'}),\n\t\t\t'post_image': forms.FileInput(attrs={'class': 'form-control','multiple': True}),\n\t\t}\n\nclass ReportForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = Report\n\t\tproblem = forms.ChoiceField(choices=PROBLEMS)\n\t\t# tags = TagField()\n\t\t\n\t\tfields = ['problem', 'notes']\n\t\t# fields = ['category', 'description', 'tags']\n\n\t\twidgets = {\n\t\t\t'notes': forms.Textarea(attrs={'class': 'form-control', 'rows':2}),\t\n\t\t\t# 'tags': forms.TagField(attrs={'class': 'form-control'}),\n\t\t\t# 'post_image': forms.FileInput(attrs={'class': 'form-control','multiple': True}),\n\t\t}\n\n\n\n\t\n" }, { "alpha_fraction": 0.5621117949485779, "alphanum_fraction": 0.5869565010070801, "avg_line_length": 36.153846740722656, "blob_id": "9d6a3f37a48c90c5052008bcfd424a00449608b4", "content_id": "6643c6f321aec4ba42bf50e75c0c3ce2435b6be4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 966, "license_type": "no_license", "max_line_length": 117, "num_lines": 26, "path": "/esko_app/migrations/0002_profile.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-05-24 15:03\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('esko_app', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('profile_picture', models.ImageField(default='default.jpg', upload_to='profile_pics')),\n ('year_level', models.CharField(blank=True, max_length=30)),\n ('phone_number', models.IntegerField(blank=True)),\n ('sns_account', models.URLField()),\n ('bio', models.CharField(blank=True, max_length=200)),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='esko_app.user')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.7121211886405945, "alphanum_fraction": 0.7121211886405945, "avg_line_length": 21, "blob_id": "dbe0cfd1c79df4f113a206096cd79f7fdb818608", "content_id": "dc1180ca92aeff6da8eae08e0d275fef709a7e98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 56, "num_lines": 9, "path": "/esko_app/apps.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass EskoAppConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'esko_app'\n\n def ready(self):\n \timport esko_app.signals\n" }, { "alpha_fraction": 0.5345155000686646, "alphanum_fraction": 0.5560481548309326, "avg_line_length": 38.474998474121094, "blob_id": "ba5c0773daed4f8ce1c2b3b05a42e9a7e6830c25", "content_id": "bdb007267012ef5f608eb2380459afc523cc49da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1579, "license_type": "no_license", "max_line_length": 117, "num_lines": 40, "path": "/esko_app/migrations/0001_initial.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-05-20 16:27\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('user_id', models.AutoField(primary_key=True, serialize=False)),\n ('email', models.CharField(max_length=50, unique=True)),\n ('password', models.CharField(max_length=30, unique=True)),\n ('username', models.CharField(blank=True, max_length=30, unique=True)),\n ('year_level', models.CharField(blank=True, max_length=30)),\n ('phone_number', models.IntegerField(blank=True)),\n ('sns_account', models.URLField()),\n ('bio', models.CharField(blank=True, max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('category', models.CharField(max_length=30)),\n ('description', models.CharField(max_length=200)),\n ('tags', models.CharField(max_length=100)),\n ('image', models.ImageField(blank=True, null=True, upload_to='')),\n ('date', models.DateField()),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='esko_app.user')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.51408451795578, "alphanum_fraction": 0.5809859037399292, "avg_line_length": 16.75, "blob_id": "56af27c0af0d59e8b07b3e622b434f35f83c60aa", "content_id": "875314990cdb979a2ca581aae4ac0dab85053d2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "no_license", "max_line_length": 47, "num_lines": 16, "path": "/esko_app/migrations/0026_delete_report.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-05-29 11:23\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('esko_app', '0025_report'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Report',\n ),\n ]\n" }, { "alpha_fraction": 0.6941580772399902, "alphanum_fraction": 0.6941580772399902, "avg_line_length": 22.200000762939453, "blob_id": "9ddbc77b8e261a54c69470ec1ce9648ac85a8b51", "content_id": "8669c1533ff3f2e8280cd951f68df288a5244691", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "no_license", "max_line_length": 149, "num_lines": 25, "path": "/esko_app/filters.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "from django import forms\nimport django_filters\n\nfrom .models import Post, Category\n\nclass PostFilter(django_filters.FilterSet):\n\tsell = 'sell'\n\tservices = 'services/rent'\n\tswap = 'swap'\n\tfind = 'find'\n\n\tcategory_choices = [\n\t\t(sell, sell),\n\t\t(services, services),\n\t\t(swap, swap),\n\t\t(find, find),\n\t]\n\t\n\t#category = django_filters.ChoiceFilter(choices=Post.objects.all())\n\t#category = django_filters.ChoiceFilter(choices=category_choices, widget=forms.Select(attrs={'class':'form-select', 'style':'background:$primary'}))\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = [\n\t\t\t'category',\n\t\t]\n\t\t" }, { "alpha_fraction": 0.5028790831565857, "alphanum_fraction": 0.5451055765151978, "avg_line_length": 21.65217399597168, "blob_id": "b82845f19cb9fd6a379f7425cd2f94e4d3cc18fb", "content_id": "4d1d68fb03a24b7247a70f5222ea950099f54b90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 67, "num_lines": 23, "path": "/esko_app/migrations/0031_auto_20210602_1720.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-06-02 09:20\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('esko_app', '0030_report'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='post',\n name='tags',\n ),\n migrations.AddField(\n model_name='post',\n name='tags',\n field=models.CharField(default='tags', max_length=100),\n preserve_default=False,\n ),\n ]\n" }, { "alpha_fraction": 0.7063537240028381, "alphanum_fraction": 0.7081527709960938, "avg_line_length": 23.169960021972656, "blob_id": "6105be85bcc966163f2ce46074433fa81b4848df", "content_id": "eb724be388043712d84c2a41173491416ba46dd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12229, "license_type": "no_license", "max_line_length": 109, "num_lines": 506, "path": "/esko_app/views.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\n\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\n\n\nfrom django.contrib.auth.views import PasswordChangeView\nfrom django.contrib.auth.forms import PasswordChangeForm\n\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse_lazy, reverse\n\n\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\n\nfrom .models import User, Post, Comment, Report\nfrom .forms import CreateUserForm, UserUpdateForm, ProfileUpdateForm, CommentForm, CreatePostForm, ReportForm\n\nfrom .filters import PostFilter\nfrom django.core.paginator import Paginator\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\n\nfrom taggit.models import Tag\n\n\n\n# from django.forms import modelformset_factory\n# from .forms import ImageForm\n# from .models import Images\n\n\n# Create your views here.\n\n\n#landing page\ndef index(request):\n\treturn render(request, 'esko_app/index.html')\n\n#sign-up page\ndef signup(request):\n\n\tif request.method == 'POST':\n\t\tform = CreateUserForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tprint('yes')\n\t\t\tform.save()\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\tmessages.success(request, f'Account successfully created!')\n\t\t\treturn redirect('/esko_app/login/')\n\t\t\t\n\telse:\n\t\tprint('no')\n\t\tform = CreateUserForm()\t\n\treturn render(request, 'esko_app/signup.html', {'form': form})\n\n\n\ndef loginPage(request):\n\n\tif request.method == 'POST':\n\t\tusername = request.POST.get('username')\n\t\tpassword = request.POST.get('password')\n\t\tuser = authenticate(request, username=username, password=password)\n\n\t\tif user is not None:\n\t\t\t# correct username and password login the user\n\t\t\tlogin(request, user)\n\t\t\treturn redirect('/esko_app/home/')\n\t\telse:\n\t\t\tmessages.error(request, 'Error wrong username/password')\n\n\treturn render(request, 'esko_app/login.html')\n\n\ndef logoutUser(request):\n\tlogout(request)\n\treturn redirect('/esko_app/login/')\n\ndef about(request):\n\treturn render(request, 'esko_app/about.html')\n\n\ndef category(request):\n\treturn render(request, 'esko_app/category.html')\n\ndef search_tags(request):\n\tif request.method == \"POST\":\n\t\tsearched = request.POST['searched']\n\t\t# tags = Tag.objects.filter()\n\t\treturn render(request, 'esko_app/search_tags.html', {'searched':searched})\n\telse:\n\t\treturn render(request, 'esko_app/search_tags.html', {})\n\n\n\nclass PostListView(ListView):\n\tmodel = Post\n\ttemplate_name = 'esko_app/profile.html' # <app>/model_viewtype.html\n\tcontext_object_name = 'posts'\n\tordering = ['-date']\n\n\nclass HomeListView(ListView):\n\tmodel = Post\n\ttemplate_name = 'esko_app/home.html' # <app>/model_viewtype.html\n\tcontext_object_name = 'posts'\n\tordering = ['-date']\n\n\n# @login_required(login_url= '/esko_app/login/')\n# def home(request):\n# \tposts = Post.objects.all().order_by('-date')\n# \t# images = Images.objects.all()\n\n# \tfor post in posts:\n\n# \t\ttotal_likes = post.total_likes()\n\n# \t\tliked = False\n# \t\tif post.likes.filter(id=request.user.id).exists():\n# \t\t\tliked = True\n\n# \tfiltered_posts = PostFilter(\n# \t\trequest.GET,\n# \t\tqueryset=posts\n# \t)\n\t\n# \tpost_paginator = Paginator(filtered_posts.qs, 3)\n# \tpage_num = request.GET.get('page')\n# \tpage = post_paginator.get_page(page_num)\n\n# \tcontext = {\n# \t\t'count' : post_paginator.count,\n# \t\t'page' : page,\n# \t\t'total_likes': total_likes,\n# \t\t'liked': liked,\n# \t\t# 'images': images,\n# \t}\t\n# \treturn render(request, 'esko_app/home.html', context)\n\n\n\n@login_required(login_url= '/esko_app/login/')\ndef home(request):\n\tposts = Post.objects.all().order_by('-date')\n\t\n\n\n\tfor post in posts:\n\n\t\ttotal_likes = post.total_likes()\n\t\ttag_list = post.tag_list()\n\t\t# print(tag_list)\n\n\t\tliked = False\n\t\tif post.likes.filter(id=request.user.id).exists():\n\t\t\tliked = True\n\n\tfiltered_posts = PostFilter(\n\t\trequest.GET,\n\t\tqueryset=posts\n\t)\n\t\n\tpost_paginator = Paginator(filtered_posts.qs, 3)\n\tpage_num = request.GET.get('page')\n\tpage = post_paginator.get_page(page_num)\n\n\tcontext = {\n\t\t'count' : post_paginator.count,\n\t\t'page' : page,\n\t\t'total_likes': total_likes,\n\t\t'liked': liked,\n\t\t'tag_list': tag_list,\n\t}\t\n\treturn render(request, 'esko_app/home.html', context)\n\n@login_required(login_url= '/esko_app/login/')\ndef PostByCategory(request):\n\tposts = Post.objects.all().order_by('-date')\n\n\n\tfor post in posts:\n\n\t\ttotal_likes = post.total_likes()\n\n\t\tliked = False\n\t\tif post.likes.filter(id=request.user.id).exists():\n\t\t\tliked = True\n\n\tfiltered_posts = PostFilter(\n\t\trequest.GET,\n\t\tqueryset=posts\n\t)\n\t\n\tpost_paginator = Paginator(filtered_posts.qs, 3)\n\tpage_num = request.GET.get('page')\n\tpage = post_paginator.get_page(page_num)\n\n\tcontext = {\n\t\t'count' : post_paginator.count,\n\t\t'page' : page,\n\t\t'total_likes': total_likes,\n\t\t'liked': liked,\n\t}\t\n\t# return render(request, 'esko_app/home.html', context)\n\treturn render(request, 'esko_app/sell.html', context)\n\n\nclass TagIndexView(ListView):\n\tmodel = Post\n\ttemplate_name = 'esko_app/tagView.html' # <app>/model_viewtype.html\n\tcontext_object_name = 'posts'\n\tordering = ['-date']\n\n\tdef get_queryset(self):\n\t\treturn Post.objects.filter(tags__slug=self.kwargs.get('tag_slug'))\n\n\n\ndef SearchByTag(request):\n \n\tif request.method == 'GET': # If the form is submitted\n\t\tsearch_query = request.GET.get('search_box', None)\n\t\tposts = Post.objects.filter(tags__contains=str(search_query))\n\n\t\ttotal_likes = 0\n\t\tliked = False\n\t\t# tag_list = ['tags']\n\t\tfor post in posts:\n\n\t\t\ttotal_likes = post.total_likes()\n\t\t\t# tag_list = post.tag_list()\n\n\t\t\tliked = False\n\t\t\tif post.likes.filter(id=request.user.id).exists():\n\t\t\t\tliked = True\n\n\t\tfiltered_posts = PostFilter(\n\t\t\trequest.GET,\n\t\t\tqueryset=posts\n\t\t)\n\t\t\n\t\tpost_paginator = Paginator(filtered_posts.qs, 3)\n\t\tpage_num = request.GET.get('page')\n\t\tpage = post_paginator.get_page(page_num)\n\n\t\tcontext = {\n\t\t\t'count' : post_paginator.count,\n\t\t\t'page' : page,\n\t\t\t'total_likes': total_likes,\n\t\t\t'liked': liked,\n\t\t\t'search_query': search_query,\n\t\t\t'tag_list' : post.tag_list(),\n\n\t\t}\t\n\t\treturn render(request, 'esko_app/search_tags.html', context)\n\n\n@login_required(login_url= '/esko_app/login/')\ndef profile(request):\n\tposts = Post.objects.filter(author=request.user).order_by('-date')\n\n\ttotal_likes = 0\n\tliked = False\n\tfor post in posts:\n\t\ttotal_likes = post.total_likes()\n\t\ttag_list = post.tag_list()\n\n\t\tliked = False\n\t\tif post.likes.filter(id=request.user.id).exists():\n\t\t\tliked = True\n\n\t\n\tfiltered_posts = PostFilter(\n\t\trequest.GET,\n\t\tqueryset=posts\n\t)\n\t\n\tpost_paginator = Paginator(filtered_posts.qs, 3)\n\tpage_num = request.GET.get('page')\n\tpage = post_paginator.get_page(page_num)\n\n\t\n\tcontext = {\n\t\t'count' : post_paginator.count,\n\t\t'page' : page,\n\t\t'total_likes': total_likes,\n\t\t'liked': liked,\n\t\t'tag_list': tag_list\n\t}\n\t\n\treturn render(request,'esko_app/profile.html', context)\n\n\ndef profileOther(request,username):\n\t\t\n\tpost = get_object_or_404(Post,id=request.POST.get('post_id'))\n\n\ttotal_likes = post.total_likes()\n\tliked = False\n\tif post.likes.filter(id=request.user.id).exists():\n\t\tliked = True\n\n\n\tposts = Post.objects.filter(author=post.author).order_by('-date')\n\tprint(posts)\n\n\tcontext = {\n\t\t'user' : post.author,\n\t\t'posts' :posts,\n\t\t'total_likes': total_likes,\n\t\t'liked': liked,\n\t\t'tag_list': post.tag_list(),\n\t}\n\treturn render(request,'esko_app/other_profile.html', context)\n\n\ndef profileOtherComment(request,username):\n\tcomment = get_object_or_404(Comment,id=request.POST.get('comment_id'))\n\tcomments = Comment.objects.filter(commenter=comment.commenter)\n\n\tposts = Post.objects.filter(author=comment.commenter).order_by('-date')\n\t\n\tcontext = {\n\t\t'user' : comment.commenter,\n\t\t'posts' :posts,\n\t\t\n\t}\t\n\treturn render(request,'esko_app/other_profile_comment.html', context)\n\n\n\n\nclass PostDetailView(LoginRequiredMixin,DetailView):\n\tmodel = Post\n\n\tdef get_context_data(self,*args, **kwargs):\n\t\tcontext = super(PostDetailView,self).get_context_data()\n\n\t\t# getting number of likes \n\t\tget_post = get_object_or_404(Post, id=self.kwargs['pk'])\n\t\ttotal_likes = get_post.total_likes()\n\t\ttag_list = get_post.tag_list()\n\n\t\tliked = False\n\t\tif get_post.likes.filter(id=self.request.user.id).exists():\n\t\t\tliked = True\n\n\t\tcontext[\"total_likes\"] = total_likes\n\t\tcontext[\"liked\"] = liked\n\t\tcontext[\"tag_list\"] = tag_list\n\t\tprint(tag_list)\n\t\tprint(get_post.tags)\n\t\treturn context\n\n\n\nclass PostCreateView(LoginRequiredMixin,CreateView):\n\tmodel = Post\n\tform_class = CreatePostForm\n\t# fields = ['category','description','tags','post_image']\n\n\tdef form_valid(self, form):\n\t\tform.instance.author = self.request.user\n\t\treturn super().form_valid(form)\n\n\nclass PostUpdateView(LoginRequiredMixin,UserPassesTestMixin,UpdateView):\n\tmodel = Post\n\tform_class = CreatePostForm\n\t# fields = ['category','description','tags','post_image']\n\n\tdef form_valid(self, form):\n\t\tform.instance.author = self.request.user\n\t\treturn super(PostUpdateView,self).form_valid(form)\n\n\tdef test_func(self):\n\t\tpost = self.get_object()\n\t\tif self.request.user == post.author:\n\t\t\treturn True\n\t\treturn False\n\nclass PostDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):\n\tmodel = Post\n\tsuccess_url = '/esko_app/profile'\n\n\tdef test_func(self):\n\t\tpost = self.get_object()\n\t\tif self.request.user == post.author:\n\t\t\treturn True\n\t\treturn False\n\n\nclass ReportPostDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):\n\tmodel = Post\n\tsuccess_url = '/esko_app/reported-posts'\n\n\tdef test_func(self):\n\t\tpost = self.get_object()\n\t\tif self.request.user == post.author:\n\t\t\treturn True\n\t\treturn False\n\n\n@login_required(login_url= '/esko_app/login/')\ndef LikeView(request,pk):\n\tpost = get_object_or_404(Post,id=request.POST.get('post_id'))\n\tliked = False\n\tif post.likes.filter(id=request.user.id).exists():\n\t\tpost.likes.remove(request.user)\n\t\tliked = False\n\telse:\n\t\tpost.likes.add(request.user)\n\t\tliked = True\n\n\n\treturn HttpResponseRedirect(reverse('esko_app:post-detail',args=[str(pk)]))\n\n\n\nclass AddCommentView(LoginRequiredMixin,CreateView):\n\tmodel = Comment\n\tform_class= CommentForm\n\ttemplate_name = 'esko_app/add_comment.html'\n\n\tdef form_valid(self, form):\n\t\tform.instance.commenter = self.request.user\n\t\tform.instance.post_id = self.kwargs['pk']\n\t\treturn super().form_valid(form)\n\t\n\tdef get_success_url(self, **kwargs):\n\t\treturn reverse('esko_app:post-detail', kwargs={'pk':self.kwargs['pk']})\n\n\n\n\n@login_required(login_url= '/esko_app/login/')\ndef reportedPosts(request):\n\treports = Report.objects.all().order_by('-date_reported')\n\t#posts = Post.objects.all(repo_id__pk=rpost.post)\n\n\tcontext = {\n\t\t'reports' : reports\n\t}\n\treturn render(request, 'esko_app/reported-post.html', context)\n\n\nclass ReportView(LoginRequiredMixin,CreateView):\n\tmodel = Report\n\tform_class= ReportForm\n\ttemplate_name = 'esko_app/add_report.html'\n\n\tdef form_valid(self, form):\n\t\tform.instance.reporter = self.request.user\n\t\tform.instance.post_id = self.kwargs['pk']\n\t\treturn super().form_valid(form)\n\t\n\tdef get_success_url(self, **kwargs):\n\t\treturn reverse('esko_app:post-detail', kwargs={'pk':self.kwargs['pk']})\n\n\nclass ReportDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):\n\tmodel = Report\n\tsuccess_url = '/esko_app/reported-posts'\n\n\tdef test_func(self):\n\t\treport = self.get_object()\n\t\tif self.request.user.is_superuser:\n\t\t\treturn True\n\t\treturn False\n\n\n\n@login_required(login_url= '/esko_app/login/')\ndef profileSettings(request):\n\tif request.method == 'POST':\n\t\tu_form = UserUpdateForm(request.POST,instance=request.user)\n\t\tp_form = ProfileUpdateForm(request.POST,request.FILES, instance=request.user.profile)\n\n\t\tif u_form.is_valid() and p_form.is_valid():\n\t\t\tu_form.save()\n\t\t\tp_form.save()\n\t\t\tmessages.success(request, f'Your profile has been updated!')\n\t\t\treturn redirect('/esko_app/profileSettings/')\n\telse:\n\t\tu_form = UserUpdateForm(instance=request.user)\n\t\tp_form = ProfileUpdateForm(instance=request.user.profile)\n\n\tcontext = {\n\t\t'u_form': u_form,\n\t\t'p_form': p_form\n\t}\n\n\treturn render(request, 'esko_app/profilesettings.html',context)\n\nclass PasswordsChangeView(PasswordChangeView):\n\tform_class = PasswordChangeForm\n\tsuccess_url = reverse_lazy('esko_app:password_success')\n\n\n\ndef password_success(request):\n\treturn render(request, 'esko_app/password_reset_done.html')" }, { "alpha_fraction": 0.5491606593132019, "alphanum_fraction": 0.5863309502601624, "avg_line_length": 31.076923370361328, "blob_id": "a289c044df8453ad342173f370ae5d83e16681b9", "content_id": "035dbb66bf72cee58c23742962d8f44f5bffe590", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 834, "license_type": "no_license", "max_line_length": 123, "num_lines": 26, "path": "/esko_app/migrations/0028_auto_20210530_2331.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-05-30 15:31\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('esko_app', '0027_auto_20210530_1506'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='post',\n name='post_image',\n ),\n migrations.CreateModel(\n name='Images',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('image', models.ImageField(blank=True, null=True, upload_to='post_pics', verbose_name='Image')),\n ('post', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='esko_app.post')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6897004842758179, "alphanum_fraction": 0.7002102136611938, "avg_line_length": 27.810606002807617, "blob_id": "d66a9c261e99d76240ccac7b846be4b72a5150cf", "content_id": "f7c441006fd6e63b2df808bcd089d2ef29ecd829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3806, "license_type": "no_license", "max_line_length": 93, "num_lines": 132, "path": "/esko_app/models.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom PIL import Image\n\nfrom django.urls import reverse\n\n# from taggit.managers import TaggableManager\n# Create your models here.\n\n\n\nclass Category(models.Model):\n\tname = models.CharField(max_length=30)\n\n\tdef __str__(self):\n\t\treturn self.name\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('/esko_app/home/')\n\n\n# class Post(models.Model):\n\n# \tCATEGORIES = [\n# \t\t('sell', 'sell'),\n# \t\t('find', 'find'),\n# \t\t('services/rent', 'services/rent'),\n# \t\t('swap', 'swap'),\n# \t]\n\t\n# \tauthor = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n# \tcategory = models.CharField(max_length=13,choices=CATEGORIES)\n# \tdescription = models.TextField(max_length=250)\n# \t# tags = models.CharField(max_length=100)\n# \tpost_image = models.ImageField(upload_to='post_pics',null=True,blank=True)\n# \tdate = models.DateTimeField(auto_now_add=True)\n# \tlikes = models.ManyToManyField(User, related_name='user_posts')\n# \ttags = TaggableManager()\n\n\n# \tdef total_likes(self):\n# \t\treturn self.likes.count()\n\n# \tdef __str__(self):\n# \t\treturn self.category\n\n# \tdef get_absolute_url(self):\n# \t\treturn reverse('esko_app:post-detail', kwargs={'pk':self.pk})\n\t\t\nclass Post(models.Model):\n\n\tCATEGORIES = [\n\t\t('sell', 'sell'),\n\t\t('find', 'find'),\n\t\t('services/rent', 'services/rent'),\n\t\t('swap', 'swap'),\n\t]\n\t\n\tauthor = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n\tcategory = models.CharField(max_length=13,choices=CATEGORIES)\n\tdescription = models.TextField(max_length=250)\n\ttags = models.CharField(max_length=100)\n\tpost_image = models.ImageField(upload_to='post_pics',null=True,blank=True)\n\tdate = models.DateTimeField(auto_now_add=True)\n\tlikes = models.ManyToManyField(User, related_name='user_posts')\n\t# tags = TaggableManager()\n\n\tdef tag_list(self):\n\t\t\n\t\treturn self.tags.split(',')\n\n\n\tdef total_likes(self):\n\t\treturn self.likes.count()\n\n\tdef __str__(self):\n\t\treturn self.category\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('esko_app:post-detail', kwargs={'pk':self.pk})\n\nclass Profile(models.Model):\n\tuser = models.OneToOneField(User, on_delete=models.CASCADE)\n\tprofile_pic = models.ImageField(default='default.jpg', upload_to='profile_pics', blank=True)\n\tyear_level = models.CharField(max_length=30, blank=True)\n\tphone_number = models.IntegerField(null=True,blank=True)\n\tsns_account = models.URLField(max_length = 200,blank=True)\n\tbio = models.CharField(max_length=200, blank=True)\n\n\tdef __str__(self):\n\t\treturn f'{self.user.username} Profile'\n\n\tdef save(self, *args, **kwargs):\n\t\tsuper(Profile,self).save(*args, **kwargs)\n\n\t\timg = Image.open(self.profile_pic.path)\n\n\t\tif img.height > 400 or img.width > 400:\n\t\t\toutput_size = (400,400)\n\t\t\timg.thumbnail(output_size)\n\t\t\timg.save(self.profile_pic.path)\n\n\nclass Comment(models.Model):\n\tpost = models.ForeignKey(Post,related_name=\"comments\",on_delete=models.CASCADE)\n\tcommenter = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n\tbody = models.TextField()\n\tdate_added = models.DateTimeField(auto_now_add=True)\n\n\tdef __str__(self):\n\t\treturn '%s - %s' % (self.post.category, self.commenter)\n\nclass Report(models.Model):\n\tPROBLEMS = [\n \t\t('hate speech', 'hate speech'),\n \t\t('violence', 'violence'),\n \t\t('harassment', 'harassment'),\n \t\t('nudity', 'nudity'),\n \t\t('false information', 'false information'),\n \t\t('spam', 'spam'),\n \t\t('others', 'others')\n \t]\n\n\treporter = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n\tpost = models.ForeignKey(Post,related_name=\"reports\",on_delete=models.CASCADE)\n\tdate_reported = models.DateTimeField(auto_now_add=True)\n\tproblem = models.CharField(max_length=50,choices=PROBLEMS)\n\tnotes = models.TextField(null=True,blank=True)\n\n\tdef __str__(self):\n\t\treturn '%s - %s' % (self.problem, self.reporter)\n\n\n\n" }, { "alpha_fraction": 0.5331950187683105, "alphanum_fraction": 0.5746887922286987, "avg_line_length": 25.77777862548828, "blob_id": "3424cbfcc69efc6c1c720bf609fd43cf84d0fccf", "content_id": "bdb4736a69a761c3244eb3987a8492e9b06971ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 482, "license_type": "no_license", "max_line_length": 144, "num_lines": 18, "path": "/esko_app/migrations/0021_alter_post_category.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-05-29 04:24\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('esko_app', '0020_alter_post_category'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='category',\n field=models.CharField(choices=[('sell', 'sell'), ('find', 'find'), ('services', 'services/rent'), ('swap', 'swap')], max_length=8),\n ),\n ]\n" }, { "alpha_fraction": 0.5292153358459473, "alphanum_fraction": 0.5809682607650757, "avg_line_length": 25.04347801208496, "blob_id": "07792fe180e4fba4b41f410cb50bfa168e25efef", "content_id": "871e6d86d0185770a57ee5f5ed8dcbaf08c30902", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 599, "license_type": "no_license", "max_line_length": 97, "num_lines": 23, "path": "/esko_app/migrations/0004_auto_20210525_0004.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-05-24 16:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('esko_app', '0003_auto_20210524_2313'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='profile_picture',\n field=models.ImageField(blank=True, default='default.jpg', upload_to='profile_pics'),\n ),\n migrations.AlterField(\n model_name='profile',\n name='sns_account',\n field=models.URLField(blank=True),\n ),\n ]\n" }, { "alpha_fraction": 0.5168269276618958, "alphanum_fraction": 0.5913461446762085, "avg_line_length": 22.11111068725586, "blob_id": "2984df0a49e3e77d89249570bc78846205b5c055", "content_id": "50f1260467fe3f286b91c31a4e3e1bf7104ed9d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "no_license", "max_line_length": 82, "num_lines": 18, "path": "/esko_app/migrations/0011_alter_post_image.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-05-26 10:02\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('esko_app', '0010_auto_20210526_1631'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='image',\n field=models.ImageField(blank=True, null=True, upload_to='post_pics'),\n ),\n ]\n" }, { "alpha_fraction": 0.5973207354545593, "alphanum_fraction": 0.6233254671096802, "avg_line_length": 46, "blob_id": "0719ed3608c5a826201f5aa8f201e3f6da2b7cfb", "content_id": "389a3154b2b97ba6b15b9f9239009b5a0c2dd711", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 265, "num_lines": 27, "path": "/esko_app/migrations/0030_report.py", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-05-30 22:28\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('esko_app', '0029_auto_20210531_0114'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Report',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_reported', models.DateTimeField(auto_now_add=True)),\n ('problem', models.CharField(choices=[('hate speech', 'hate speech'), ('violence', 'violence'), ('harassment', 'harassment'), ('nudity', 'nudity'), ('false information', 'false information'), ('spam', 'spam'), ('others', 'others')], max_length=50)),\n ('notes', models.TextField(blank=True, null=True)),\n ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reports', to='esko_app.post')),\n ('reporter', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.4985465109348297, "alphanum_fraction": 0.7020348906517029, "avg_line_length": 16.225000381469727, "blob_id": "d5ae8c6386630d552b174623df2833e18cda3f02", "content_id": "cac8e5982b10b6a6860ab4b1044bc12a855ff5ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 688, "license_type": "no_license", "max_line_length": 27, "num_lines": 40, "path": "/requirements.txt", "repo_name": "reg-11/122-Project", "src_encoding": "UTF-8", "text": "asgiref==3.3.4\ncertifi==2020.12.5\ncffi==1.14.5\nchardet==4.0.0\nclick==7.1.2\ncryptography==3.4.7\ndefusedxml==0.7.1\ndj-database-url==0.5.0\nDjango==3.2.3\ndjango-allauth==0.44.0\ndjango-filter==2.4.0\ndjango-heroku==0.3.1\ndjango-taggit==1.4.0\nfastapi==0.61.1\nFlask==2.0.0\ngunicorn==20.1.0\nh11==0.9.0\nidna==2.10\nitsdangerous==2.0.1\nJinja2==3.0.1\nMarkupSafe==2.0.1\noauthlib==3.1.0\nPillow==8.2.0\npsycopg2==2.8.6\npycparser==2.20\npydantic==1.6.1\nPyJWT==2.1.0\npython-decouple==3.4\npython3-openid==3.2.0\npytz==2021.1\nrequests==2.25.1\nrequests-oauthlib==1.3.0\nsqlparse==0.4.1\nstarlette==0.13.6\ntyping-extensions==3.10.0.0\nurllib3==1.26.4\nuvicorn==0.11.8\nwebsockets==8.1\nWerkzeug==2.0.1\nwhitenoise==5.2.0" } ]
19
houluy/file-transfer
https://github.com/houluy/file-transfer
3214bf82083dc71b27c856322b4f54167e5a30d7
357b22042798d4ff5e1dea032c8f583f19e53c70
e5febc2e0df1de7e3d032bba0f2dfec7222fbd6a
refs/heads/master
2022-09-08T03:24:39.872925
2020-05-28T16:28:59
2020-05-28T16:28:59
267,640,372
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7102272510528564, "alphanum_fraction": 0.7329545617103577, "avg_line_length": 21, "blob_id": "ccb1e57c9b7602013c33234dd0d09395e12ef73e", "content_id": "7884c9c3f310f32e2e8675dc6dc0af7441afa673", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/sender.py", "repo_name": "houluy/file-transfer", "src_encoding": "UTF-8", "text": "import client\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserver_addr = (\"houlu.me\", 8674)\nfilename = \"test.txt\"\n\nclient.send(s, server_addr, filename)\n" }, { "alpha_fraction": 0.5091299414634705, "alphanum_fraction": 0.5166487693786621, "avg_line_length": 24.108108520507812, "blob_id": "3611af76f0b1b5f439545e3e481bb4ca0e7418f6", "content_id": "3b044dea9fa76e6c154db67d2af3664a1e3a18a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 931, "license_type": "no_license", "max_line_length": 48, "num_lines": 37, "path": "/client.py", "repo_name": "houluy/file-transfer", "src_encoding": "UTF-8", "text": "import socket\n\nSYNC_MSG = \"SEND\"\nRECV_MSG = \"RECV\"\nBUFFER_SIZE = 1024\nEND_SIGN = \"\\u169D\"\n\ndef recv(s, server_addr):\n s.sendto(RECV_MSG.encode(), server_addr)\n file_name, _ = s.recvfrom(BUFFER_SIZE)\n try:\n f = open(file_name.decode(), 'wb')\n except:\n pass\n else:\n while True:\n data, _ = s.recvfrom(BUFFER_SIZE)\n if data == END_SIGN.encode():\n break\n else:\n f.write(data)\n f.write(b'hello')\n finally:\n f.close()\n s.close()\n\ndef send(s, server_addr, filename):\n s.sendto(SYNC_MSG.encode(), server_addr)\n with open(filename, 'rb') as f:\n s.sendto(filename.encode(), server_addr)\n while True:\n data = f.read(BUFFER_SIZE)\n if not data:\n break\n s.sendto(data, server_addr)\n s.sendto(END_SIGN.encode(), server_addr)\n s.close()\n\n\n" }, { "alpha_fraction": 0.6028202176094055, "alphanum_fraction": 0.6274970769882202, "avg_line_length": 21.972972869873047, "blob_id": "cc086922ceeea91948cea636159a317e85e0318e", "content_id": "01374099f4f77ccf304ea0c519a3e316c4d76dd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 851, "license_type": "no_license", "max_line_length": 52, "num_lines": 37, "path": "/main.py", "repo_name": "houluy/file-transfer", "src_encoding": "UTF-8", "text": "import socket\nimport logging\nimport logging.config as lc\nimport config\n\nlogger = logging.getLogger(\"main\")\nlc.dictConfig(config.log_config)\n\nSYNC_LEN = 4\nSYNC_MSG = \"SEND\"\nRECV_MSG = \"RECV\"\nEND_SIGN = \"\\n\"\nBUFFER_SIZE = 2048\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\naddr = (\"\", 8674)\ns.bind(addr)\nnum = 2\n\nwhile True:\n sync_data0, addr0 = s.recvfrom(SYNC_LEN)\n logger.info(f\"Get a connection with {addr0}\")\n sync_data1, addr1 = s.recvfrom(SYNC_LEN)\n logger.info(f\"Get a connection with {addr0}\")\n # Start a new thread to handle\n if sync_data0.decode() == SYNC_MSG:\n src = addr0\n tgt = addr1\n else:\n src = addr1\n tgt = addr0\n # Source and target are determined\n while True:\n data, _ = s.recvfrom(BUFFER_SIZE)\n if not data:\n break\n s.sendto(data, tgt)\n\n" }, { "alpha_fraction": 0.7083333134651184, "alphanum_fraction": 0.7361111044883728, "avg_line_length": 19.571428298950195, "blob_id": "36de5e07785036498122dea663268222f1802691", "content_id": "831f7b7304686522e0db7cc3358ed226db2f8fac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/receiver.py", "repo_name": "houluy/file-transfer", "src_encoding": "UTF-8", "text": "import client\nimport socket\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserver_addr = (\"houlu.me\", 8674)\n\nclient.recv(s, server_addr)\n" } ]
4
xujinchang/HAR-stacked-residual-bidir-LSTMs
https://github.com/xujinchang/HAR-stacked-residual-bidir-LSTMs
e58c393c72b8a10f4b5bb74ffe702b3b6b47b034
ac304130245683cdacfe40ce938af658023e8711
c56490863e867739f6df80cf6752aa91c1c50911
refs/heads/master
2021-01-25T06:17:09.315187
2017-07-15T13:57:51
2017-07-15T13:57:51
93,539,946
0
1
null
2017-06-06T16:29:13
2017-05-31T08:34:30
2017-06-04T19:24:01
null
[ { "alpha_fraction": 0.4732891023159027, "alphanum_fraction": 0.4902234673500061, "avg_line_length": 32.30232620239258, "blob_id": "48bc673f4170444cf6290388ed7a7b9f9c2afe9d", "content_id": "30a472933d7365aefca8b9dd75123f3c946f4be3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5954, "license_type": "permissive", "max_line_length": 125, "num_lines": 172, "path": "/process_video/extract_video.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "# coding: utf8\n\nimport subprocess as sp\nimport logging\nimport os\n\nfrom Queue import Queue\nimport threading\n\n__author__ = 'xujinchang'\n\nlogger = logging.getLogger(__name__)\n\nclass video_test:\n command0 = 'ffprobe -i {} -show_format | grep duration'\n command1 = 'ffmpeg -ss {time_off_h}:{time_off_m}:{time_off_s} -i {input_file} -frames:v 1 {output_file}'\n command2 = 'ffmpeg -y -i {input_file} -ss {start_time} -t {duration} -acodec aac -strict experimental {output_file}'\n stopProcess = False\n _do_extract_finished = False #_do_extract是否执行完的标志\n detect_finished = False\n dur = 0\n\n ############################提取图片################################\n '''\n 功能:初始化函数\n 参数:\n video_name: 视频名\n gframe_dir: 截取的视频的图像的保存位置\n '''\n def __init__(self,video_name,frame_dir,cut_dir):\n self.video_name = video_name\n self.frame_dir = frame_dir\n self.cut_dir = cut_dir\n '''\n 功能:\n 通过读取视频文件名返回视频的总时间\n '''\n def read_video_duration(self):\n the_command0 = self.command0\n duration = sp.check_output(the_command0.format(self.video_name), shell=True)\n try:\n duration = float(duration.split('=')[1])\n return duration\n except Exception as e:\n logger.error('get video duration exception', e)\n\n '''\n 功能:\n 从视频中每隔一定时间提取一帧并且保存成图片\n '''\n def _do_extract(self,offset):\n the_command1 = self.command1\n the_current_frame = 0\n dur = 0\n\n dur = self.read_video_duration()\n logger.debug('******************duration: {}'.format(dur))\n\n the_current_frame = offset\n\n while the_current_frame <= dur:\n logger.debug('******************current time {}'.format(the_current_frame))\n cmd = the_command1.format(\n time_off_h=int(the_current_frame / 3600),\n time_off_m=int((the_current_frame / 60) % 60),\n time_off_s=the_current_frame % 60,\n input_file=self.video_name,\n output_file=(self.frame_dir+'/{}.jpg').format(the_current_frame)\n )\n sp.call(cmd, shell=True)\n the_current_frame += 0.02\n self._do_extract_finished = True\n\n # #########################发送detect请求##########################\n\n '''\n 功能:\n\n '''\n def video(self,faces):\n the_command2 = self.command2\n logging.debug(\"****************video*********************\")\n print \"****************video*********************\"\n q = Queue(maxsize = 7)\n for face in faces:\n q.put(face)\n if(q.qsize() == 7):\n start = q.get()\n print float(face) - float(start)\n if( (float(face) - float(start) ) == 3.0):\n logging.debug(\"******************* put video:\"+(self.cut_dir+'/{}.mp4').format(start))\n cmd = the_command2.format(\n input_file= self.video_name,\n start_time= get_hhmmss(start),\n duration=\"00:00:03\",\n output_file=(self.cut_dir+'/{}.mp4').format(start)\n )\n sp.call(cmd, shell=True)\n q.queue.clear()\n else:\n q.queue.clear()\n\n '''\n 功能:\n 执行所有功能模块\n '''\n def execute(self,offset):\n logging.debug(\"****************extract start********************\")\n print \"****************extract start********************\"\n self._do_extract(offset)\n logging.debug(\"****************extract end********************\")\n # self.video(faces)\n'''\n功能:\n返回hh:mm:ss的字符串\n'''\ndef get_hhmmss(seconds):\n seconds = float(seconds)\n if seconds%1 != 0:\n seconds = seconds+0.5\n hh=int(seconds / 3600)\n if(hh<10):\n hh = \"0\"+str(hh)\n mm=int((seconds / 60) % 60)\n if(mm<10):\n mm = \"0\"+str(mm)\n ss=int(seconds % 60)\n if (ss<10):\n ss = \"0\"+str(ss)\n return str(hh)+\":\"+str(mm)+\":\"+str(ss)\n\n'''\n功能:\n定义线程\n'''\nclass myThread (threading.Thread): #继承父类threading.Thread\n def __init__(self,i):\n threading.Thread.__init__(self)\n self.i = i\n def run(self):\n file_name = \"/home/tmp_data_dir/zhuzezhou/codalab/CTest\"\n save_name = \"/localSSD/xjc/codalab_test\"\n mp4_list = os.listdir(file_name)\n #mp4_list = ['2481608439_HAPPINESS.mp4']\n for x in mp4_list:\n # if not os.path.exists(\"{name}/{name1}\".format(name=save_name,name1=str(self.i))):\n # os.makedirs(\"{name}/{name1}\".format(name=save_name,name1=str(self.i)))\n if not os.path.exists(\"{name}/{name2}\".format(name=save_name,name2=x[:-4])):\n os.makedirs(\"{name}/{name2}\".format(name=save_name,name2=x[:-4]))\n gvideo_name = \"{name}/{mp4_file}\".format(name=file_name,mp4_file=x)\n print(gvideo_name)\n log_path = '.{name}.log'.format(name=str(self.i)+\"_\"+x[:-4])\n gframe_dir = \"{name}/{frames}\".format(name=save_name,frames=x[:-4])\n print(gframe_dir)\n gcut_dir = None\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename=log_path,\n filemode='w')\n\n test = video_test(gvideo_name,gframe_dir,gcut_dir)\n test.execute(0)\n\nthreads = []\nfor i in range(1,2):\n thread = myThread(i)\n threads.append(thread)\n thread.start()\n\nfor thread in threads:\n thread.join()\n" }, { "alpha_fraction": 0.5378031134605408, "alphanum_fraction": 0.6148359775543213, "avg_line_length": 34, "blob_id": "76b2a47cdadc584493a1681bcba4d5598980c853", "content_id": "3d99c8a1e17842aeee6b68f46595390560fa9ac8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3505, "license_type": "permissive", "max_line_length": 107, "num_lines": 100, "path": "/process_video/face_warp.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "import sys\nimport os\n#import Image\nimport numpy as np\nimport cPickle\nimport cv2\nimport numpy.linalg as linalg\n\n#################### functions begin ##########################\n\ndef crop_image(img,landmarks, scale_id,region_id):\n scale_list = [1.0, 1.3, 1.5] #1.5for coda ,1.7for 112\n scale = scale_list[scale_id]\n #print img.shape\n width,height,channel = img.shape\n scale_w,scale_h = width/scale, height/scale\n h_off,w_off = (height-scale_h)/2, (width-scale_w)/2\n window_size = 120 / scale\n image_scale = img[int(w_off):width-int(w_off),int(h_off):height-int(h_off)]\n if region_id == 0:\n return image_scale\n if region_id in [1,2,3,4]:\n start_pos = int(scale_h*(2*region_id-2)/15)\n end_pos = int(scale_h*(2*region_id + 7)/15)\n return image_scale[start_pos:end_pos,0:int(scale_w)]\n if region_id in [5,6,7,8,9]:\n center_pos = [int(landmarks[(region_id-5)*2]) - w_off, int(landmarks[(region_id-5)*2 + 1]) - h_off]\n x = max(int(center_pos[1]-window_size/2),0)\n y = max(int(center_pos[0]-window_size/2),0)\n x_width = min(scale_w-x,window_size)\n y_height = min(scale_h-y,window_size)\n return image_scale[x:x+int(x_width),y:y+int(y_height)]\n return None\n\ndef resize_image(img, dsize):\n return cv2.resize(img,dsize)\n\ndef alignTransformation(src_pos,dst_pos):\n\t\ta=0.0;b=0.0;tx=0.0;ty=0.0;X1=0.0;Y1=0.0;X2=0.0;Y2=0.0;Z=0.0;C1=0.0;C2=0.0;W=2.0\n\t\tfor i in range(0,2):\n\t\t\t\tx1=src_pos[i*2]\n\t\t\t\ty1=src_pos[i*2+1]\n\t\t\t\tx2=dst_pos[i*2]\n\t\t\t\ty2=dst_pos[i*2+1]\n\t\t\t\tZ=Z+x2*x2+y2*y2\n\t\t\t\tX1=X1+x1\n\t\t\t\tY1=Y1+y1\n\t\t\t\tX2=X2+x2\n\t\t\t\tY2=Y2+y2\n\t\t\t\tC1=C1+x1*x2+y1*y2\n\t\t\t\tC2=C2+y1*x2-x1*y2\n\t\tSolnA=[X2, -Y2, W, 0, Y2, X2, 0, W, Z, 0, X2, Y2, 0, Z, -Y2, X2]\n\t\tA=np.array(SolnA,dtype=np.float64).reshape(4,4)\n\t\tSolnB=[X1, Y1, C1, C2]\n\t\tB=np.array(SolnB,dtype=np.float64).reshape(4,1)\n\t\tSoln=np.zeros((4,1),dtype=np.float64)\n\t\tcv2.solve(A,B,Soln,cv2.DECOMP_SVD)\n\t\ta=Soln[0,0];b=Soln[1,0];tx=Soln[2,0];ty=Soln[3,0]\n\t\tnorm=a*a+b*b\n\t\ta_=a/norm;b_=-b/norm\n\t\ttx_=(-a*tx - b*ty)/norm\n\t\tty_=(b*tx - a*ty)/norm\n\t\treturn a_,b_,tx_,ty_\ndef calcParameters(src_pos,dst_pos): #l_x,l_y,r_x,r_y\n\t\t[a,b,tx,ty]=alignTransformation(src_pos,dst_pos)\n\t\treturn a,b,tx,ty\n#################### functions end ############################\n\nREF_SIZE=500\nREF_POS=[200,260,287,260,243,332,206,370,281,370] #reference eyes pos,l_x,l_y,r_x,r_y\n#SCALAR=0.5\nSCALAR=1.0\nREF_SIZE = int(REF_SIZE * SCALAR)\nREF_POS = [int(REF_POS[i] * SCALAR) for i in range(0,10) ]\n# REF_SIZE=250 #reference face size width==height\n# REF_POS=[99,99,158,99,98,165,153,165,133,138] #reference eyes pos,l_x,l_y,r_x,r_y\n\ndef face_warp_main(src_img,landmark_pos):\n\t\teyes_pos=[float(landmark_pos[x]) for x in range(0,10)]\n\t\t#print eyes_pos\n\t\tsrc_img_size=src_img.shape\n\t\t[a,b,tx,ty]=calcParameters(eyes_pos,REF_POS)\n\t\ttranM=np.zeros((2,3),dtype=np.float64)\n\t\ttranM[0,0]=a;tranM[1,1]=a\n\t\ttranM[0,1]=-b\n\t\ttranM[1,0]=b\n\t\ttranM[0,2]=tx\n\t\ttranM[1,2]=ty\n\t\twarpdst=np.zeros((REF_SIZE,REF_SIZE,3),dtype=np.uint8)\n\t\tcv2.warpAffine(src_img,tranM,(REF_SIZE,REF_SIZE),warpdst)\n\t\ttranM_new=np.zeros((2,3),dtype=np.float64)\n\t\tnew_landmark_pos=[]\n\t\tfor idx in range(0,5):\n\t\t\t\told_pos_x=float(landmark_pos[idx*2])\n\t\t\t\told_pos_y=float(landmark_pos[idx*2+1])\n\t\t\t\tnew_pos_x=tranM[0,0]*old_pos_x+tranM[0,1]*old_pos_y+tranM[0,2]\n\t\t\t\tnew_pos_y=tranM[1,0]*old_pos_x+tranM[1,1]*old_pos_y+tranM[1,2]\n\t\t\t\tnew_landmark_pos.append(new_pos_x)\n\t\t\t\tnew_landmark_pos.append(new_pos_y)\n\t\treturn warpdst,new_landmark_pos\n\n\n\n\n\n" }, { "alpha_fraction": 0.5442478060722351, "alphanum_fraction": 0.5899705290794373, "avg_line_length": 26.571428298950195, "blob_id": "61238b1704739808ed051b698a40268b3b446700", "content_id": "aba9f96341b74f77ff73f224a2a7120fab83cae4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1356, "license_type": "permissive", "max_line_length": 60, "num_lines": 49, "path": "/process_video/pro_codatest.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "import os\nfp = open('test_condafinal','w+')\nfp1 = open('sort_conda_test_final.txt','r')\nlist1 = os.listdir('/localSSD/xjc/codalab_test/')\nlist1.remove('1.py')\nlist1.remove('conda_test_final.txt')\nlist1.remove('train.txt')\n#list1.remove('1469584007_CONTENTMENT')\nlist1 = sorted(list1)\nprint list1\nindex = 0\nframe_list = []\nprint len(list1)\nfor line in fp1.readlines():\n line = line.strip().split(' ')\n key = line[0].split('/')[-2]\n if key == list1[index]:\n frame_list.append(line[0])\n else:\n frame_select=[]\n total = len(frame_list)\n if total < 128: print frame_list[0], len(frame_list)\n if total < 256: step = 1\n if total > 256 and total <384: step = 2\n if total > 384: step = 3\n count = 0\n for item in xrange(0,total,1):\n frame_select.append(frame_list[-item])\n count = count + 1\n if count == 128:break\n for item in frame_select:\n fp.write(item+'\\n')\n index = index + 1\n frame_list = []\n\nframe_select=[]\ntotal = len(frame_list)\nprint \"final\",len(frame_list)\n #print total\ncount = 0\nfor item in xrange(0,total,3):\n frame_select.append(frame_list[-item])\n count = count + 1\n if count == 128:break\nfor item in frame_select:\n fp.write(item+'\\n')\nprint (len(frame_select))\nfp.close()\nfp1.close()\n\n\n\n\n\n" }, { "alpha_fraction": 0.5055000185966492, "alphanum_fraction": 0.5304999947547913, "avg_line_length": 28.397058486938477, "blob_id": "72cf2b2aba18edb8e877d714e98c802d96a9070a", "content_id": "6288a480d5610e962666b3b542e195fd5d65bf24", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "permissive", "max_line_length": 94, "num_lines": 68, "path": "/read_emotion.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\n\ndef load_X_my(X_signals_paths):\n X_signals = []\n item = []\n count = 0\n file = open(X_signals_paths, 'r')\n for row in file:\n \tcount = count + 1\n \titem.append([np.array(serie, dtype=np.float32) for serie in row.strip().split(' ')])\n \tif count % 128 == 0:\n \t\tX_signals.append(item)\n \t\titem=[]\n file.close()\n return np.array(X_signals)\n\ndef load_Y_my(X_signals_paths):\n X_signals = []\n file = open(X_signals_paths, 'r')\n for row in file:\n\n \tX_signals.append([np.array(serie, dtype=np.float32) for serie in row.strip().split(' ')])\n\n file.close()\n return np.array(X_signals)\ndef one_hot(y_):\n \"\"\"\n Function to encode output labels from number indexes.\n\n E.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]\n \"\"\"\n y_ = y_.reshape(len(y_))\n n_values = int(np.max(y_)) + 1\n return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS\n\n\n # X_signals.append(\n # [np.array(serie, dtype=np.float32) for serie in [\n # row.replace(' ', ' ').strip().split(' ') for row in file\n # ]]\n # )\n # file.close()\n # file = open(X_signals_paths, 'r')\n\n # X_signals.append(\n # [np.array(serie, dtype=np.float32) for serie in [\n # row.replace(' ', ' ').strip().split(' ') for row in file\n # ]]\n # )\n\n # file.close()\n print (np.array(X_signals).shape)\n return np.array(X_signals) #(n,128,4096)\ndef load_X(X_signals_paths):\n X_signals = []\n\n for signal_type_path in X_signals_paths:\n file = open(signal_type_path, 'r')\n # Read dataset from disk, dealing with text files' syntax\n X_signals.append(\n [np.array(serie, dtype=np.float32) for serie in [\n row.replace(' ', ' ').strip().split(' ') for row in file\n ]]\n )\n file.close()\n\n return np.transpose(np.array(X_signals), (1, 2, 0))\n\n" }, { "alpha_fraction": 0.5549896359443665, "alphanum_fraction": 0.5734767317771912, "avg_line_length": 39.45801544189453, "blob_id": "26502b8439d9b5d5ec9a802a9e16c72005038c06", "content_id": "717cfc49885f82f69acfa13cdac60bef4e800c76", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5301, "license_type": "permissive", "max_line_length": 152, "num_lines": 131, "path": "/train.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "\nfrom lstm_architecture import one_hot, run_with_config\n\nimport numpy as np\n\nimport os\n\nfrom read_emotion import load_Y_my, load_X_my\n\nfrom jiangwei import do_pca, load_X_pca\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n#--------------------------------------------\n# Neural net's config.\n#--------------------------------------------\n\nclass Config(object):\n \"\"\"\n define a class to store parameters,\n the input should be feature mat of training and testing\n \"\"\"\n\n def __init__(self, X_train, X_test):\n # Data shaping\n self.train_count = len(X_train) # 451training series\n self.test_data_count = len(X_test) # 36 testing series\n self.n_steps = len(X_train[0]) # 128 time_steps per series\n self.n_classes = 2 # Final output classes\n\n # Training\n self.learning_rate = 0.001\n self.lambda_loss_amount = 0.005\n self.training_epochs = 50\n self.batch_size = 1\n self.clip_gradients = 15.0\n self.gradient_noise_scale = None\n # Dropout is added on inputs and after each stacked layers (but not\n # between residual layers).\n self.keep_prob_for_dropout = 0.85 # **(1/3.0)\n\n # Linear+relu structure\n self.bias_mean = 0.3\n # I would recommend between 0.1 and 1.0 or to change and use a xavier\n # initializer\n self.weights_stddev = 0.2\n\n ########\n # NOTE: I think that if any of the below parameters are changed,\n # the best is to readjust every parameters in the \"Training\" section\n # above to properly compare the architectures only once optimised.\n ########\n\n # LSTM structure\n # Features count is of 9: three 3D sensors features over time\n self.n_inputs = len(X_train[0][0])\n self.n_hidden = 28 # nb of neurons inside the neural network\n # Use bidir in every LSTM cell, or not:\n self.use_bidirectionnal_cells = False\n\n # High-level deep architecture\n self.also_add_dropout_between_stacked_cells = False # True\n # NOTE: values of exactly 1 (int) for those 2 high-level parameters below totally disables them and result in only 1 starting LSTM.\n # self.n_layers_in_highway = 1 # Number of residual connections to the LSTMs (highway-style), this is did for each stacked block (inside them).\n # self.n_stacked_layers = 1 # Stack multiple blocks of residual\n # layers.\n\n\n#--------------------------------------------\n# Dataset-specific constants and functions + loading\n#--------------------------------------------\n\n# Useful Constants\n\n# Those are separate normalised input features for the neural network\n\n\n# Load \"X\" (the neural network's training and testing inputs)\nX_train_signals_paths = \"/localSSD/xjc/codalab_train/train/train_train_fc7_feature_new.fea\"\nX_test_signals_paths = \"/localSSD/xjc/codalab_train/train/train_valid_fc7_feature_new.fea\"\ny_train_path = \"/localSSD/xjc/codalab_train/train/train_y_label_2.fea\"\ny_test_path = \"/localSSD/xjc/codalab_train/train/valid_y_label_2.fea\"\nX_train = load_X_pca(X_train_signals_paths)\nX_test = load_X_pca(X_test_signals_paths)\nX_train = do_pca(X_train)\nX_test = do_pca(X_test)\ny_train = one_hot(load_Y_my(y_train_path))\ny_test = one_hot(load_Y_my(y_test_path))\n#--------------------------------------------\n# Training (maybe multiple) experiment(s)\n#--------------------------------------------\n\nn_layers_in_highway = 0\nn_stacked_layers = 3\ntrial_name = \"{}x{}\".format(n_layers_in_highway, n_stacked_layers)\n\nfor learning_rate in [0.0001]: # [0.01, 0.007, 0.001, 0.0007, 0.0001]:\n for lambda_loss_amount in [0.005]:\n for clip_gradients in [15.0]:\n print \"learning_rate: {}\".format(learning_rate)\n print \"lambda_loss_amount: {}\".format(lambda_loss_amount)\n print \"\"\n\n class EditedConfig(Config):\n def __init__(self, X, Y):\n super(EditedConfig, self).__init__(X, Y)\n\n # Edit only some parameters:\n self.learning_rate = learning_rate\n self.lambda_loss_amount = lambda_loss_amount\n self.clip_gradients = None\n # Architecture params:\n self.n_layers_in_highway = n_layers_in_highway\n self.n_stacked_layers = n_stacked_layers\n\n # # Useful catch upon looping (e.g.: not enough memory)\n # try:\n # accuracy_out, best_accuracy = run_with_config(EditedConfig)\n # except:\n # accuracy_out, best_accuracy = -1, -1\n accuracy_out, best_accuracy, f1_score_out, best_f1_score = (\n run_with_config(EditedConfig, X_train, y_train, X_test, y_test, learning_rate)\n )\n print (accuracy_out, best_accuracy, f1_score_out, best_f1_score)\n\n with open('{}_result_emotion_2.txt'.format(trial_name), 'a') as f:\n f.write(str(learning_rate) + ' \\t' + str(lambda_loss_amount) + ' \\t' + str(clip_gradients) + ' \\t' + str(\n accuracy_out) + ' \\t' + str(best_accuracy) + ' \\t' + str(f1_score_out) + ' \\t' + str(best_f1_score) + '\\n\\n')\n\n print \"________________________________________________________\"\n print \"\"\nprint \"Done.\"\n" }, { "alpha_fraction": 0.6013333201408386, "alphanum_fraction": 0.6271111369132996, "avg_line_length": 32.07352828979492, "blob_id": "db6e62ceb0e5f74307054c9c21bb5bfcae71bb61", "content_id": "d1c7d7738c9db65a90502c50e8246d21a8bdd5c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2250, "license_type": "permissive", "max_line_length": 102, "num_lines": 68, "path": "/process_video/crop.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "import numpy as np\nimport cv2\nimport cPickle\nimport sys\nimport time\nimport copy\nfrom face_warp import *\nfrom multiprocessing import Pool\nfrom reco_DA import crop_image_with_pad\n\npoch_size=40\ndef rectToSquare(rect):\n center_x = rect[0] + rect[2]/2\n center_y = rect[1] + rect[3]/2\n height=rect[2]\n width=rect[2]\n center_y += int(height*0.08)\n return [center_x - width / 2,center_y - height / 2, width, height]\ndef rectToSquare1(rect):\n height=rect[2]\n width=rect[3]\n flag=0 if height>width else 1\n if flag==0:\n diff=height-width\n return [rect[0],rect[1]-diff/2,height,height]\n else:\n diff=width-height\n return [rect[0]-diff/2,rect[1],width,width]\n\n\ndef write_rect(img_list):\n for idx,line in enumerate(img_list):\n filename = line.split()[0]\n rect =[int(float(la)) for la in line.split()[1:5]]\n img = cv2.imread(filename)\n rect2=rectToSquare1(rect)\n #newname='/home/xujinchang/share/AGG/Liveness/detection/dafeng_rect/'+filename.split('/')[-1]\n newname='./'+'15_'+filename.split('/')[-1]\n img_crop=crop_image_with_pad(img,rect2)\n cv2.imwrite(newname,img_crop)\n if idx%poch_size==0:\n print idx,\"finished\"\n\ndef write_landmark(img_list):\n for idx,line in enumerate(img_list):\n filename = line.split()[0]\n print filename\n landmark =[int(float(la)) for la in line.split()[1:5]]\n img = cv2.imread(filename)\n landmarks=line.split()[1:11]\n imgs,lands = face_warp_main(img,landmarks)\n img_crop = crop_image(imgs,lands,2,0)\n\n #newname='/localSSD/xjc/liveness/align/xiong_true/'+filename.split('/')[-1]\n #newname='./'+filename.split('/')[-1]\n #print newname\n cv2.imwrite(filename,img_crop)\n\nif __name__==\"__main__\":\n image_lists=[line.strip() for line in open(sys.argv[1])]\n image_lists_length=len(image_lists)\n task_lists=[image_lists[i*poch_size:(i+1)*poch_size] for i in range(image_lists_length/poch_size)]\n if not image_lists_length%poch_size==0:\n task_lists.append(image_lists[(image_lists_length/poch_size)*poch_size:])\n pool=Pool(20)\n pool.map(write_landmark,tuple(task_lists))\n pool.close()\n pool.join()\n\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6509221792221069, "avg_line_length": 25.77108383178711, "blob_id": "4fccdb7b2c82281032e6733f1cfb7fc7a9dfe9b0", "content_id": "a725f06ef2a8b37bd313f9dd32d8ee53c23d4f56", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2223, "license_type": "permissive", "max_line_length": 103, "num_lines": 83, "path": "/process_video/extract_emotion.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "#coding:utf-8\nimport numpy as np\nimport time\nimport os\nimport json\nimport sys\nimport socket\nimport copy\nimport math\nimport matplotlib.pyplot as plt\nimport cv2\nsys.path.insert(0,'./python')\nimport caffe\n\ncaffe.set_mode_gpu()\ncaffe.set_device(2)\n\nMODEL_DEF = '/home/xujinchang/caffe-blur-pose/models/vgg/deploy_vgg_fer.prototxt'\nMODEL_PATH = '../models/vgg_19_finetue_fer2013_iter_30000.caffemodel'\n\nmean = np.array((104, 117, 123), dtype=np.float32)\nSIZE = 250\n\ndef predict(the_net,image):\n inputs = []\n if not os.path.exists(image):\n raise Exception(\"Image path not exist\")\n return\n try:\n tmp_input = cv2.imread(image)\n tmp_input = cv2.resize(tmp_input,(SIZE,SIZE))\n tmp_input = tmp_input[13:13+224,13:13+224]\n tmp_input = tmp_input.transpose((2, 0, 1))\n tmp_input = np.require(tmp_input, dtype=np.float32)\n except Exception as e:\n #raise Exception(\"Image damaged or illegal file format\")\n return None\n the_net.blobs['data'].reshape(1, *tmp_input.shape)\n the_net.reshape()\n the_net.blobs['data'].data[...] = tmp_input\n the_net.forward()\n scores = copy.deepcopy(the_net.blobs['fc7'].data)\n return scores\n\nif __name__==\"__main__\":\n f = open(\"/home/xujinchang/share/AGG/Liveness/detection/coda_lab/label_frame/frame_label_train\",\"rb\")\n fp = open(\"train_train_fc7_feature_new.fea\",\"w\")\n fp2 = open(\"ex2.fea\",\"w\")\n net = caffe.Net(MODEL_DEF, MODEL_PATH, caffe.TEST)\n #score_map = dict()\n\n start_time = time.time()\n X_features=[]\n y_label=[]\n count = 0\n for line in f.readlines():\n line = line.strip().split(\" \")\n print line[0]\n fea = predict(net,line[0])\n fea = list(np.reshape(fea, (fea.shape[1], fea.shape[0])))\n feature = np.require(fea)# (4096,1)\n X_features.append(feature)\n count = count + 1\n if count % 128 == 0:\n y_label.append(line[1])\n\n print len(X_features)\n print len(y_label)\n for item in X_features:\n for idx in range(item.shape[0]):\n fp.write(str(item[idx][0])+' ')\n fp.write('\\n')\n #print X_features[10]\n for item in y_label:\n fp2.write(str(item)+'\\n')\n #print X_features[-1]\n\n f.close()\n fp.close()\n fp2.close()\n #print X_features[0]\n end_time = time.time()\n forward_time = end_time - start_time\n\n" }, { "alpha_fraction": 0.746442437171936, "alphanum_fraction": 0.7716688513755798, "avg_line_length": 34.953487396240234, "blob_id": "e746f0f54c7e03fd540bfb0a3bd3f618c771160e", "content_id": "85a00c7c2fa757eaf88909f63404c2bdc7d285c0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1546, "license_type": "permissive", "max_line_length": 239, "num_lines": 43, "path": "/README.md", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "# ChaLearn LAP Real Versus Fake Expressed Emotion Challenge @ICCV 2017\n\n# Data Process\n\n\nFirstly, the videos are preprocessed with the codes in process_video. extract_video.py is used to extract the frame from videos. \n\nThen the per image is resized using img_resize.m with a radio 1/2. \n\nAfter that, we use face detection SDK to detect the face_rect and landmarks in per image and the results are in testcoda_land file and testcoda_rect file. We use the landmark points to align the faces and crop all the faces using corp.py. \n\nFinally,we just use pro_codatest.py and sort_frame.py to get 128 frames per video.\n\n# Train the model\n\n\nFirstly, we use a pretrained CNN network vgg16 on fer2013.\n\nThen, the vgg16 is treated as a feature extractor and use the fc7 4096 features.\n\nWe use extact_emotion.py to extract the 128 frames per video as the feature of the video.\nBefore using the lstm to train with all the features, we do a pca and change the final dimensions of features to 1024.\n\nWe use train.py to train the lstm network using tensorflow 0.11.0rc0.\n\n\n# Predict the results\n\n\nThe test.py is used to load the model to predict the test labels.\n\nAnd the pro_results.py is used to get pkl files.\n\n\n# Just for Test\n\nFirstly, I have extracted the feature for the test set. You just need to download the test_feature.zip.\n\nThen, change the X_test_path to the feature path.\n\nAfter that, you can just run test.py if you have installed the tensorflow 0.11.0rc0\n\nFinally, if you want to produce the test_prediction.pkl, you can run the pro_result.py.\n" }, { "alpha_fraction": 0.6497372984886169, "alphanum_fraction": 0.6690017580986023, "avg_line_length": 18.65517234802246, "blob_id": "f214cdb6386ab85ae206d5e9a218670a8c467c4c", "content_id": "a882d8c6e47417c94d4d114f8292fb14df955d2d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 571, "license_type": "permissive", "max_line_length": 51, "num_lines": 29, "path": "/pro_result.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "import os\nimport pickle\nimport pprint\npath = '/home/tmp_data_dir/zhuzezhou/codalab/CTest'\nlist1 = os.listdir(path)\nlist1 = sorted(list1)\noutput = open('test_prediction.pkl','wb')\nfp = open('test_label_result','r')\ndic = {}\ncount = 0\nfor line in fp.readlines():\n\tline = line.strip().split('\\n')\n\tif int(line[0]) == 0:\n\t\tdic[list1[count]] = 'fake'\n\telse:\n\t\tdic[list1[count]] = 'true'\n\tcount = count + 1\npickle.dump(dic, output)\n\noutput.close()\nfp.close()\n\n\npkl_file = open('test_prediction.pkl', 'rb')\n\ndata1 = pickle.load(pkl_file)\npprint.pprint(data1)\n\npkl_file.close()\n\n" }, { "alpha_fraction": 0.6443940997123718, "alphanum_fraction": 0.6613816618919373, "avg_line_length": 19.534883499145508, "blob_id": "50545f4f6cf9395c174c4d8298939e723b07a3e3", "content_id": "96cdd49452927fe710c262d74ea214a5a6fb69ef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 883, "license_type": "permissive", "max_line_length": 94, "num_lines": 43, "path": "/jiangwei.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "from sklearn.decomposition import PCA\nimport numpy as np\n\n\ndef load_X_pca(X_signals_paths):\n X_signals = []\n file = open(X_signals_paths, 'r')\n for row in file:\n \tX_signals.append([np.array(serie, dtype=np.float32) for serie in row.strip().split(' ')])\n \n file.close()\n return np.array(X_signals)\n\ndef do_pca(X_data):\n\tpca=PCA(n_components=1024)\n\tnewX=pca.fit_transform(X_data)\n\tnew_feature = reshape_pca(newX)\n\treturn new_feature\n\n\ndef reshape_pca(newX):\n\tcount = 0\n\titem = []\n\tfeature = []\n\tfor index in xrange(0,newX.shape[0]):\n\t\tcount = count + 1\n\t\titem.append(newX[index])\n\t\t\n\t\tif count % 128 == 0:\n\t\t\tfeature.append(item)\n\t\t\titem = []\n\treturn np.array(feature)\n\n\n\n\n\n# X_train_signals_paths = \"/home/xujinchang/caffe-blur-pose/valid_fc7_feature_new.fea\"\n\n# X_data = load_X_my(X_train_signals_paths)\n# new_feature = do_pca(X_data)\n\n# print new_feature.shape\n" }, { "alpha_fraction": 0.5797849297523499, "alphanum_fraction": 0.5899556875228882, "avg_line_length": 40.851322174072266, "blob_id": "30b77978c0981455a79cb916335d643c8b5cef33", "content_id": "4770df71348f6af9e0328570f5a5a80da1e73ff7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20549, "license_type": "permissive", "max_line_length": 152, "num_lines": 491, "path": "/lstm_architecture.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "__author__ = 'gchevalier'\n\nimport tensorflow as tf\nfrom sklearn import metrics\nfrom sklearn.utils import shuffle\nimport numpy as np\nimport os\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2,3\"\n\ndef one_hot(y):\n \"\"\"convert label from dense to one hot\n argument:\n label: ndarray dense label ,shape: [sample_num,1]\n return:\n one_hot_label: ndarray one hot, shape: [sample_num,n_class]\n \"\"\"\n # e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]\n\n y = y.reshape(len(y))\n n_values = int(np.max(y)) + 1\n return np.eye(n_values)[np.array(y, dtype=np.int32)] # Returns FLOATS\n\n\n\n\ndef batch_norm(input_tensor, config, i):\n # Implementing batch normalisation: this is used out of the residual layers\n # to normalise those output neurons by mean and standard deviation.\n\n if config.n_layers_in_highway == 0:\n # There is no residual layers, no need for batch_norm:\n return input_tensor\n\n with tf.variable_scope(\"batch_norm\") as scope:\n if i != 0:\n # Do not create extra variables for each time step\n scope.reuse_variables()\n\n # Mean and variance normalisation simply crunched over all axes\n axes = list(range(len(input_tensor.get_shape())))\n\n mean, variance = tf.nn.moments(input_tensor, axes=axes, shift=None, name=None, keep_dims=False)\n stdev = tf.sqrt(variance+0.001)\n\n # Rescaling\n bn = input_tensor - mean\n bn /= stdev\n # Learnable extra rescaling\n\n # tf.get_variable(\"relu_fc_weights\", initializer=tf.random_normal(mean=0.0, stddev=0.0)\n bn *= tf.get_variable(\"a_noreg\", initializer=tf.random_normal([1], mean=0.5, stddev=0.0))\n bn += tf.get_variable(\"b_noreg\", initializer=tf.random_normal([1], mean=0.0, stddev=0.0))\n # bn *= tf.Variable(0.5, name=(scope.name + \"/a_noreg\"))\n # bn += tf.Variable(0.0, name=(scope.name + \"/b_noreg\"))\n\n return bn\n\ndef relu_fc(input_2D_tensor_list, features_len, new_features_len, config):\n \"\"\"make a relu fully-connected layer, mainly change the shape of tensor\n both input and output is a list of tensor\n argument:\n input_2D_tensor_list: list shape is [batch_size,feature_num]\n features_len: int the initial features length of input_2D_tensor\n new_feature_len: int the final features length of output_2D_tensor\n config: Config used for weights initializers\n return:\n output_2D_tensor_list lit shape is [batch_size,new_feature_len]\n \"\"\"\n\n W = tf.get_variable(\n \"relu_fc_weights\",\n initializer=tf.random_normal(\n [features_len, new_features_len],\n mean=0.0,\n stddev=float(config.weights_stddev)\n )\n )\n b = tf.get_variable(\n \"relu_fc_biases_noreg\",\n initializer=tf.random_normal(\n [new_features_len],\n mean=float(config.bias_mean),\n stddev=float(config.weights_stddev)\n )\n )\n\n # intra-timestep multiplication:\n output_2D_tensor_list = [\n tf.nn.relu(tf.matmul(input_2D_tensor, W) + b)\n for input_2D_tensor in input_2D_tensor_list\n ]\n\n return output_2D_tensor_list\n\n\ndef single_LSTM_cell(input_hidden_tensor, n_outputs):\n \"\"\" define the basic LSTM layer\n argument:\n input_hidden_tensor: list a list of tensor,\n shape: time_steps*[batch_size,n_inputs]\n n_outputs: int num of LSTM layer output\n return:\n outputs: list a time_steps list of tensor,\n shape: time_steps*[batch_size,n_outputs]\n \"\"\"\n with tf.variable_scope(\"lstm_cell\"):\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(n_outputs, state_is_tuple=True, forget_bias=0.999)\n outputs, _ = tf.nn.rnn(lstm_cell, input_hidden_tensor, dtype=tf.float32)\n return outputs\n\n\ndef bi_LSTM_cell(input_hidden_tensor, n_inputs, n_outputs, config):\n \"\"\"build bi-LSTM, concatenating the two directions in an inner manner.\n argument:\n input_hidden_tensor: list a time_steps series of tensor, shape: [sample_num, n_inputs]\n n_inputs: int units of input tensor\n n_outputs: int units of output tensor, each bi-LSTM will have half those internal units\n config: Config used for the relu_fc\n return:\n layer_hidden_outputs: list a time_steps series of tensor, shape: [sample_num, n_outputs]\n \"\"\"\n n_outputs = int(n_outputs/2)\n\n print \"bidir:\"\n\n with tf.variable_scope('pass_forward') as scope2:\n hidden_forward = relu_fc(input_hidden_tensor, n_inputs, n_outputs, config)\n forward = single_LSTM_cell(hidden_forward, n_outputs)\n\n print (len(hidden_forward), str(hidden_forward[0].get_shape()))\n\n # Backward pass is as simple as surrounding the cell with a double inversion:\n with tf.variable_scope('pass_backward') as scope2:\n hidden_backward = relu_fc(input_hidden_tensor, n_inputs, n_outputs, config)\n backward = list(reversed(single_LSTM_cell(list(reversed(hidden_backward)), n_outputs)))\n\n with tf.variable_scope('bidir_concat') as scope:\n # Simply concatenating cells' outputs at each timesteps on the innermost\n # dimension, like if the two cells acted as one cell\n # with twice the n_hidden size:\n layer_hidden_outputs = [\n tf.concat(len(f.get_shape()) - 1, [f, b])\n for f, b in zip(forward, backward)]\n\n return layer_hidden_outputs\n\n\ndef residual_bidirectional_LSTM_layers(input_hidden_tensor, n_input, n_output, layer_level, config, keep_prob_for_dropout):\n \"\"\"This architecture is only enabled if \"config.n_layers_in_highway\" has a\n value only greater than int(0). The arguments are same than for bi_LSTM_cell.\n arguments:\n input_hidden_tensor: list a time_steps series of tensor, shape: [sample_num, n_inputs]\n n_inputs: int units of input tensor\n n_outputs: int units of output tensor, each bi-LSTM will have half those internal units\n config: Config used for determining if there are residual connections and if yes, their number and with some batch_norm.\n return:\n layer_hidden_outputs: list a time_steps series of tensor, shape: [sample_num, n_outputs]\n \"\"\"\n with tf.variable_scope('layer_{}'.format(layer_level)) as scope:\n\n if config.use_bidirectionnal_cells:\n get_lstm = lambda input_tensor: bi_LSTM_cell(input_tensor, n_input, n_output, config)\n else:\n get_lstm = lambda input_tensor: single_LSTM_cell(relu_fc(input_tensor, n_input, n_output, config), n_output)\n def add_highway_redisual(layer, residual_minilayer):\n return [a + b for a, b in zip(layer, residual_minilayer)]\n\n hidden_LSTM_layer = get_lstm(input_hidden_tensor)\n # Adding K new (residual bidir) connections to this first layer:\n for i in range(config.n_layers_in_highway - 1):\n with tf.variable_scope('LSTM_residual_{}'.format(i)) as scope2:\n hidden_LSTM_layer = add_highway_redisual(\n hidden_LSTM_layer,\n get_lstm(input_hidden_tensor)\n )\n\n if config.also_add_dropout_between_stacked_cells:\n hidden_LSTM_layer = [tf.nn.dropout(out, keep_prob_for_dropout) for out in hidden_LSTM_layer]\n\n return [batch_norm(out, config, i) for i, out in enumerate(hidden_LSTM_layer)]\n\n\ndef LSTM_network(feature_mat, config, keep_prob_for_dropout):\n \"\"\"model a LSTM Network,\n it stacks 2 LSTM layers, each layer has n_hidden=32 cells\n and 1 output layer, it is a full connet layer\n argument:\n feature_mat: ndarray fature matrix, shape=[batch_size,time_steps,n_inputs]\n config: class containing config of network\n return:\n : ndarray output shape [batch_size, n_classes]\n \"\"\"\n\n with tf.variable_scope('LSTM_network') as scope: # TensorFlow graph naming\n\n feature_mat = tf.nn.dropout(feature_mat, keep_prob_for_dropout)\n\n # Exchange dim 1 and dim 0\n feature_mat = tf.transpose(feature_mat, [1, 0, 2])\n print feature_mat.get_shape()\n # New feature_mat's shape: [time_steps, batch_size, n_inputs]\n\n # Temporarily crush the feature_mat's dimensions\n feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs])\n print feature_mat.get_shape()\n # New feature_mat's shape: [time_steps*batch_size, n_inputs]\n\n # Split the series because the rnn cell needs time_steps features, each of shape:\n hidden = tf.split(0, config.n_steps, feature_mat)\n print (len(hidden), str(hidden[0].get_shape()))\n # New shape: a list of lenght \"time_step\" containing tensors of shape [batch_size, n_hidden]\n\n # Stacking LSTM cells, at least one is stacked:\n print \"\\nCreating hidden #1:\"\n hidden = residual_bidirectional_LSTM_layers(hidden, config.n_inputs, config.n_hidden, 1, config, keep_prob_for_dropout)\n print (len(hidden), str(hidden[0].get_shape()))\n\n for stacked_hidden_index in range(config.n_stacked_layers - 1):\n # If the config permits it, we stack more lstm cells:\n print \"\\nCreating hidden #{}:\".format(stacked_hidden_index+2)\n hidden = residual_bidirectional_LSTM_layers(hidden, config.n_hidden, config.n_hidden, stacked_hidden_index+2, config, keep_prob_for_dropout)\n print (len(hidden), str(hidden[0].get_shape()))\n\n print \"\"\n\n # Final fully-connected activation logits\n # Get the last output tensor of the inner loop output series, of shape [batch_size, n_classes]\n last_hidden = tf.nn.dropout(hidden[-1], keep_prob_for_dropout)\n last_logits = relu_fc(\n [last_hidden],\n config.n_hidden, config.n_classes, config\n )[0]\n return last_logits\n\n\ndef run_with_config(Config, X_train, y_train, X_test, y_test):\n tf.reset_default_graph() # To enable to run multiple things in a loop\n\n #-----------------------------------\n # Define parameters for model\n #-----------------------------------\n config = Config(X_train, X_test)\n print(\"Some useful info to get an insight on dataset's shape and normalisation:\")\n print(\"features shape, labels shape, each features mean, each features standard deviation\")\n print(X_test.shape, y_test.shape,\n np.mean(X_test), np.std(X_test))\n print(\"the dataset is therefore properly normalised, as expected.\")\n\n #------------------------------------------------------\n # Let's get serious and build the neural network\n #------------------------------------------------------\n #with tf.device(\"/cpu:0\"): # Remove this line to use GPU. If you have a too small GPU, it crashes.\n X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs], name=\"X\")\n Y = tf.placeholder(tf.float32, [None, config.n_classes], name=\"Y\")\n\n # is_train for dropout control:\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n keep_prob_for_dropout = tf.cond(is_train,\n lambda: tf.constant(\n config.keep_prob_for_dropout,\n name=\"keep_prob_for_dropout\"\n ),\n lambda: tf.constant(\n 1.0,\n name=\"keep_prob_for_dropout\"\n )\n )\n\n pred_y = LSTM_network(X, config, keep_prob_for_dropout)\n\n # Loss, optimizer, evaluation\n\n # Softmax loss with L2 and L1 layer-wise regularisation\n print \"Unregularised variables:\"\n for unreg in [tf_var.name for tf_var in tf.trainable_variables() if (\"noreg\" in tf_var.name or \"Bias\" in tf_var.name)]:\n print unreg\n l2 = config.lambda_loss_amount * sum(\n tf.nn.l2_loss(tf_var)\n for tf_var in tf.trainable_variables()\n if not (\"noreg\" in tf_var.name or \"Bias\" in tf_var.name)\n )\n # first_weights = [w for w in tf.all_variables() if w.name == 'LSTM_network/layer_1/pass_forward/relu_fc_weights:0'][0]\n # l1 = config.lambda_loss_amount * tf.reduce_mean(tf.abs(first_weights))\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred_y, Y)) + l2 # + l1\n\n # Gradient clipping Adam optimizer with gradient noise\n optimize = tf.contrib.layers.optimize_loss(\n loss,\n global_step=tf.Variable(0),\n learning_rate=config.learning_rate,\n optimizer=tf.train.AdamOptimizer(learning_rate=config.learning_rate),\n clip_gradients=config.clip_gradients,\n gradient_noise_scale=config.gradient_noise_scale\n )\n\n correct_pred = tf.equal(tf.argmax(pred_y, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))\n\n #--------------------------------------------\n # Hooray, now train the neural network\n #--------------------------------------------\n # Note that log_device_placement can be turned of for less console spam.\n\n\n sessconfig = tf.ConfigProto(log_device_placement=False)\n sessconfig.gpu_options.allow_growth=True\n saver = tf.train.Saver(max_to_keep=None)\n with tf.Session(config=sessconfig) as sess:\n tf.initialize_all_variables().run()\n #model_pth = './checkpoint_1'+'/model'+str(9900)\n #saver.restore(sess, model_pth)\n\n best_accuracy = (0.0, \"iter: -1\")\n best_f1_score = (0.0, \"iter: -1\")\n\n # Start training for each batch and loop epochs\n\n worst_batches = []\n\n for i in range(config.training_epochs):\n\n # Loop batches for an epoch:\n shuffled_X, shuffled_y = shuffle(X_train, y_train, random_state=i*42)\n for start, end in zip(range(0, config.train_count, config.batch_size),\n range(config.batch_size, config.train_count + 1, config.batch_size)):\n\n _, train_acc, train_loss, train_pred = sess.run(\n [optimize, accuracy, loss, pred_y],\n feed_dict={\n X: shuffled_X[start:end],\n Y: shuffled_y[start:end],\n is_train: True\n }\n )\n\n worst_batches.append(\n (train_loss, shuffled_X[start:end], shuffled_y[start:end])\n )\n worst_batches = list(sorted(worst_batches))[-5:] # Keep 5 poorest\n\n # Train F1 score is not on boosting\n train_f1_score = metrics.f1_score(\n shuffled_y[start:end].argmax(1), train_pred.argmax(1), average=\"weighted\"\n )\n\n # Retrain on top worst batches of this epoch (boosting):\n # a.k.a. \"focus on the hardest exercises while training\":\n for _, x_, y_ in worst_batches:\n\n _, train_acc, train_loss, train_pred = sess.run(\n [optimize, accuracy, loss, pred_y],\n feed_dict={\n X: x_,\n Y: y_,\n is_train: True\n }\n )\n\n # Test completely at the end of every epoch:\n # Calculate accuracy and F1 score\n\n if i % 10 == 0:\n os.makedirs('./checkpoint/checkpoint'+str(i))\n saver.save(sess, './checkpoint/checkpoint'+str(i) + '/model' + str(i))\n\n pred_out, accuracy_out, loss_out = sess.run(\n [pred_y, accuracy, loss],\n feed_dict={\n X: X_test,\n Y: y_test,\n is_train: False\n }\n )\n\n # \"y_test.argmax(1)\": could be optimised by being computed once...\n f1_score_out = metrics.f1_score(\n y_test.argmax(1), pred_out.argmax(1), average=\"weighted\"\n )\n\n print (\n \"iter: {}, \".format(i) + \\\n \"train loss: {}, \".format(train_loss) + \\\n \"train accuracy: {}, \".format(train_acc) + \\\n \"train F1-score: {}, \".format(train_f1_score) + \\\n \"test loss: {}, \".format(loss_out) + \\\n \"test accuracy: {}, \".format(accuracy_out) + \\\n \"test F1-score: {}\".format(f1_score_out)\n )\n\n best_accuracy = max(best_accuracy, (accuracy_out, \"iter: {}\".format(i)))\n best_f1_score = max(best_f1_score, (f1_score_out, \"iter: {}\".format(i)))\n\n print (\"\")\n print (\"final test accuracy: {}\".format(accuracy_out))\n print (\"best epoch's test accuracy: {}\".format(best_accuracy))\n print (\"final F1 score: {}\".format(f1_score_out))\n print (\"best epoch's F1 score: {}\".format(best_f1_score))\n print (\"\")\n\n\n # returning both final and bests accuracies and f1 scores.\n return accuracy_out, best_accuracy, f1_score_out, best_f1_score\n\n\n\ndef test_with_config(Config, X_test):\n tf.reset_default_graph() # To enable to run multiple things in a loop\n\n #-----------------------------------\n # Define parameters for model\n #-----------------------------------\n # config = Config(X_train, X_test)\n # print(\"Some useful info to get an insight on dataset's shape and normalisation:\")\n # print(\"features shape, labels shape, each features mean, each features standard deviation\")\n # print(X_test.shape, y_test.shape,\n # np.mean(X_test), np.std(X_test))\n # print(\"the dataset is therefore properly normalised, as expected.\")\n\n #------------------------------------------------------\n # Let's get serious and build the neural network\n #------------------------------------------------------\n #with tf.device(\"/cpu:0\"): # Remove this line to use GPU. If you have a too small GPU, it crashes.\n config = Config(X_test, X_test)\n X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs], name=\"X\")\n Y = tf.placeholder(tf.float32, [None, config.n_classes], name=\"Y\")\n\n # is_train for dropout control:\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n keep_prob_for_dropout = tf.cond(is_train,\n lambda: tf.constant(\n config.keep_prob_for_dropout,\n name=\"keep_prob_for_dropout\"\n ),\n lambda: tf.constant(\n 1.0,\n name=\"keep_prob_for_dropout\"\n )\n )\n\n pred_y = LSTM_network(X, config, keep_prob_for_dropout)\n\n # Loss, optimizer, evaluation\n\n # Softmax loss with L2 and L1 layer-wise regularisation\n print \"Unregularised variables:\"\n for unreg in [tf_var.name for tf_var in tf.trainable_variables() if (\"noreg\" in tf_var.name or \"Bias\" in tf_var.name)]:\n print unreg\n l2 = config.lambda_loss_amount * sum(\n tf.nn.l2_loss(tf_var)\n for tf_var in tf.trainable_variables()\n if not (\"noreg\" in tf_var.name or \"Bias\" in tf_var.name)\n )\n # first_weights = [w for w in tf.all_variables() if w.name == 'LSTM_network/layer_1/pass_forward/relu_fc_weights:0'][0]\n # l1 = config.lambda_loss_amount * tf.reduce_mean(tf.abs(first_weights))\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred_y, Y)) + l2 # + l1\n\n # Gradient clipping Adam optimizer with gradient noise\n optimize = tf.contrib.layers.optimize_loss(\n loss,\n global_step=tf.Variable(0),\n learning_rate=config.learning_rate,\n optimizer=tf.train.AdamOptimizer(learning_rate=config.learning_rate),\n clip_gradients=config.clip_gradients,\n gradient_noise_scale=config.gradient_noise_scale\n )\n\n correct_pred = tf.equal(tf.argmax(pred_y, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))\n\n #--------------------------------------------\n # Hooray, now train the neural network\n #--------------------------------------------\n # Note that log_device_placement can be turned of for less console spam.\n\n sessconfig = tf.ConfigProto(log_device_placement=False)\n saver = tf.train.Saver()\n model_pth = './checkpoint_020.0001/checkpoint37'+'/model37'\n #model_pth = './checkpoint2/checkpoint5600/model5600'\n with tf.Session(config=sessconfig) as sess:\n saver.restore(sess, model_pth)\n #tf.initialize_all_variables().run()\n pred_out = sess.run(pred_y,\n feed_dict={\n X: X_test,\n is_train: False\n }\n )\n # Start training for each batch and loop epochs\n\n # returning both final and bests accuracies and f1 scores.\n return pred_out\n" }, { "alpha_fraction": 0.5961538553237915, "alphanum_fraction": 0.6307692527770996, "avg_line_length": 22.636363983154297, "blob_id": "5ec0446eba12399e1ba0b80e8d3b6a699f660d7b", "content_id": "355dcc9685d9579155b8f46e54b72822f7ae904b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "permissive", "max_line_length": 36, "num_lines": 11, "path": "/process_video/sort_frame.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "fp = open('test_condafinal','r')\nfp1 = open('sort_testcodafinal','w')\nlist1 = []\nfor line in fp.readlines():\n line = line.strip().split('\\n')\n list1.append(line[0])\nlist1 = sorted(list1)\nfor item in list1:\n fp1.write(item+'\\n')\nfp.close()\nfp1.close()\n" }, { "alpha_fraction": 0.6167634725570679, "alphanum_fraction": 0.6331734657287598, "avg_line_length": 36.349056243896484, "blob_id": "21a46e8e5743769a6c8b821c7d8678990295d9a2", "content_id": "d7c65af67c09ccd8f1c9e8509947ecfe49dc3553", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3961, "license_type": "permissive", "max_line_length": 150, "num_lines": 106, "path": "/predict.py", "repo_name": "xujinchang/HAR-stacked-residual-bidir-LSTMs", "src_encoding": "UTF-8", "text": "\nfrom lstm_architecture import one_hot, run_with_config, test_with_config\n\nimport numpy as np\n\nimport os\n\nfrom read_emotion import load_Y_my, load_X_my\n\nfrom jiangwei import do_pca, load_X_pca\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n#--------------------------------------------\n# Neural net's config.\n#--------------------------------------------\n\nclass Config(object):\n \"\"\"\n define a class to store parameters,\n the input should be feature mat of training and testing\n \"\"\"\n\n def __init__(self, X_train, X_test):\n # Data shaping\n self.train_count = len(X_train) # 451 training series\n self.test_data_count = len(X_test) # 36 testing series\n self.n_steps = len(X_train[0]) # 128 time_steps per series\n self.n_classes = 2 # Final output classes\n\n # Training\n self.learning_rate = 0.001\n self.lambda_loss_amount = 0.005\n self.training_epochs = 10000\n self.batch_size = 30\n self.clip_gradients = 15.0\n self.gradient_noise_scale = None\n # Dropout is added on inputs and after each stacked layers (but not\n # between residual layers).\n self.keep_prob_for_dropout = 0.5 # **(1/3.0)0.85\n\n # Linear+relu structure\n self.bias_mean = 0.3\n # I would recommend between 0.1 and 1.0 or to change and use a xavier\n # initializer\n self.weights_stddev = 0.2\n\n ########\n # NOTE: I think that if any of the below parameters are changed,\n # the best is to readjust every parameters in the \"Training\" section\n # above to properly compare the architectures only once optimised.\n ########\n\n # LSTM structure\n # Features count is of 9: three 3D sensors features over time\n self.n_inputs = len(X_train[0][0])\n self.n_hidden = 28 # nb of neurons inside the neural network\n # Use bidir in every LSTM cell, or not:\n self.use_bidirectionnal_cells = False\n\n # High-level deep architecture\n self.also_add_dropout_between_stacked_cells = False # True\n # NOTE: values of exactly 1 (int) for those 2 high-level parameters below totally disables them and result in only 1 starting LSTM.\n self.n_layers_in_highway = 3 # Number of residual connections to the LSTMs (highway-style), this is did for each stacked block (inside them).\n self.n_stacked_layers = 3 # Stack multiple blocks of residual\n # layers.\n\n\n#--------------------------------------------\n# Dataset-specific constants and functions + loading\n#--------------------------------------------\n\n# Useful Constants\n\n# Those are separate normalised input features for the neural network\n\nX_train_signals_paths = \"/home/xujinchang/caffe-blur-pose/valid_fc7_feature_new.fea\"\nX_test_signals_paths = \"/home/xujinchang/caffe-blur-pose/test_fc7_feature_new.fea\"\ny_train_path = \"/home/xujinchang/caffe-blur-pose/valid_y_label_2.fea\"\ny_test_path = \"/home/xujinchang/caffe-blur-pose/valid_y_label_2.fea\"\n#X_train = load_X_my(X_train_signals_paths)\nX_valid_path =\"/localSSD/xjc/codalab_train/valid/valid_fc7_feature_new.fea\"\n#X_valid_path = \"/localSSD/xjc/codalab_train/test/final_fc7_feature_new.fea\"\n#X_test = load_X_my(X_test_signals_paths)\nX_valid_result = load_X_pca(X_valid_path)\nX_valid_result = do_pca(X_valid_result)\n\n\nn_layers_in_highway = 0\nn_stacked_layers = 2\ntrial_name = \"{}x{}\".format(n_layers_in_highway, n_stacked_layers)\nclass EditedConfig(Config):\n def __init__(self, X, Y):\n super(EditedConfig, self).__init__(X, Y)\n self.n_layers_in_highway = n_layers_in_highway\n self.n_stacked_layers = n_stacked_layers\n\npred_out = test_with_config(EditedConfig, X_valid_result)\nprint type(pred_out)\nfx = open('test_reslut','w')\nprint >>fx, (pred_out)\nfx.close()\nfy = open('test_label_result','w')\nfor item in xrange(0,len(pred_out)):\n print np.argmax(pred_out[item])\n fy.write(str(np.argmax(pred_out[item]))+'\\n')\n\nfy.close()\n\n" } ]
13
rebeccapatterson/ToolBox-WordFrequency
https://github.com/rebeccapatterson/ToolBox-WordFrequency
430101feb448e67159465f1a8b43492ac5fc7569
df25a2b972678d1a1f7fdbac85e1fdf705067100
263fbf31d80845878b5fa2f40df75d038e5954c6
refs/heads/master
2020-12-24T12:33:49.486354
2016-03-14T01:07:14
2016-03-14T01:07:14
52,304,429
0
0
null
2016-02-22T20:46:49
2016-02-17T15:52:02
2016-02-22T01:17:11
null
[ { "alpha_fraction": 0.656541109085083, "alphanum_fraction": 0.6634264588356018, "avg_line_length": 36.42424392700195, "blob_id": "0149b492212ff31beddc5e101cd894af563d93c3", "content_id": "369637fd266440576d25f9f02814551bf5b145ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2469, "license_type": "no_license", "max_line_length": 92, "num_lines": 66, "path": "/frequency.py", "repo_name": "rebeccapatterson/ToolBox-WordFrequency", "src_encoding": "UTF-8", "text": "\"\"\" Analyzes the word frequencies in a book downloaded from\n Project Gutenberg \n\n @AUTHOR REBECCA PATTERSON 03-10-16\"\"\"\n#import packages that are used\nimport pickle\nimport string\n\ndef get_word_list(file_name):\n \"\"\" Reads the specified project Gutenberg book. Header comments,\n punctuation, and whitespace are stripped away. The function\n returns a list of the words used in the book as a list.\n All words are converted to lower case.\n \"\"\"\n #first load the data file with the text\n input_file= open(file_name, 'r')\n #strips away the header comment\n lines= input_file.readlines()\n current_line=0\n while lines[current_line].find('START OF THIS PROJECT GUTENBERG EBOOK') == -1:\n current_line+=1\n lines= lines[current_line+1:]\n #converts all letters in text to lower case\n lower_text=str()\n for line in lines:\n lower_text+= \" \" +line.lower()\n #create list of words in string (to eliminate white space) with puncuation stripped away\n clean_text= [x.strip(string.punctuation) for x in lower_text.split()]\n return clean_text\n\n\ndef get_top_n_words(word_list, n):\n \"\"\" Takes a list of words as input and returns a list of the n most frequently\n occurring words ordered from most to least frequently occurring.\n\n word_list: a list of words (assumed to all be in lower case with no\n punctuation\n n: the number of words to return\n returns: a list of n most frequently occurring words ordered from most\n frequently to least frequently occurring\n \"\"\"\n #make empty dictionary, fill with key=word, value=word count from input word list\n d= dict()\n for word in word_list:\n d[word]= d.get(word, 0)+1\n #creates list of tuples with count before the word for elements in dictionary\n word_count= []\n for word in d:\n word_count.append((d[word], word))\n #sort list in order of decreasing word count value\n word_count.sort(reverse=True)\n #rewrite with just the word\n words= []\n for count, word in word_count:\n words.append(word)\n #create list of top n occuring words\n top= words[0:n-1] \n return top\n\nif __name__=='__main__':\n \"\"\"when the code is ran, the chosen book pickle file is converted into a list\n that is then analyezed for word count\"\"\"\n n=100\n word_list= get_word_list('Dracula_full_text.txt')\n top_words= get_top_n_words(word_list, n)\n print top_words" }, { "alpha_fraction": 0.7731092572212219, "alphanum_fraction": 0.7882353067398071, "avg_line_length": 65.11111450195312, "blob_id": "967b444070c0b1372c94e5597c96574bba3636eb", "content_id": "ea9f00e330ec1be8947f2e8c13b334d20b8cc837", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 595, "license_type": "no_license", "max_line_length": 316, "num_lines": 9, "path": "/README.md", "repo_name": "rebeccapatterson/ToolBox-WordFrequency", "src_encoding": "UTF-8", "text": "# ToolBox-WordFrequency\nWord Frequency Analysis Project Toolbox starter code\n\nFull instructions at https://sites.google.com/site/sd16spring/home/project-toolbox/word-frequency-analysis\n\nThis program currently returns the top 100 words in the book Dracula. To change the number of words, open the code and change the value of n in line 63. To choose a different book, download the .txt file, place it in the file that the code is in, and in the code replace the file specified in word_list in line 64.\n\nTo run the program and see the top word list in the terminal, type \n\t$ python frequency.py\n" } ]
2
naixinzhang/Freddie
https://github.com/naixinzhang/Freddie
284e565a3c4cbc91d0d641758f7ec4192977207b
624fd263c42a431def8b10f524aa130d25da364b
2f06203df8df6ab90702f81280e27f25c0a8b907
refs/heads/master
2020-06-29T00:48:54.894305
2020-03-06T19:37:34
2020-03-06T19:37:34
200,389,835
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5263681411743164, "alphanum_fraction": 0.5293532609939575, "avg_line_length": 27.742856979370117, "blob_id": "aa9f256c9ee71050fa0e3dc27f366eef8c3638df", "content_id": "45957ac0cfc90fbe85f6f78fa61599dd0e053904", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1005, "license_type": "no_license", "max_line_length": 107, "num_lines": 35, "path": "/Scripts/hospitalsNY.py", "repo_name": "naixinzhang/Freddie", "src_encoding": "UTF-8", "text": "################################# NY HOSPITALS #################################\n#\n# Title: Freddie\n# Files: cleaning.py,hospitalsNY.py,input.py, main.py,path.py,regression.py,statistics.py,test_2.py,test.py\n\n#\n# Author:Naixin Zhang\n# Email:[email protected]\n#\n############################### OUTSIDE HELP CREDITS ###########################\n#\n# Persons: Cornelia Ilin\n# Online sources: available in README file\n#\n############################### 80 COLUMNS WIDE ################################\n\n'''\n# The hospitalsNY.py script holds mainly for user input and response formation for greetings\n'''\n\ndef welcome_prompt():\n '''\n # prints the welcome prompt.\n # @param: none.\n '''\n print(\"Hi, I'm Freddie, what is your name?\")\n \ndef greetings(user_name):\n '''\n # Freddie gets the user name and asks what he can do.\n # @ param: user_name.\n '''\n print(\"Nice to meet you %s. How can I help you today?\" % user_name)\n input_instructions = input(\">: \")\n print(\"Ok, I can help you with this.\")" }, { "alpha_fraction": 0.5812357068061829, "alphanum_fraction": 0.5831971168518066, "avg_line_length": 28.708738327026367, "blob_id": "3f4c6fd8316634490873c3f64d6e5a101c2fc390", "content_id": "bb01740e190a2c1bcdb5929c8e7635ef75012de6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3059, "license_type": "no_license", "max_line_length": 107, "num_lines": 103, "path": "/Scripts/path.py", "repo_name": "naixinzhang/Freddie", "src_encoding": "UTF-8", "text": "################################# NY HOSPITALS #################################\n#\n# Title: Freddie\n# Files: cleaning.py,hospitalsNY.py,input.py, main.py,path.py,regression.py,statistics.py,test_2.py,test.py\n\n#\n# Author:Naixin Zhang\n# Email:[email protected]\n#\n############################### OUTSIDE HELP CREDITS ###########################\n#\n# Persons: Cornelia Ilin\n# Online sources: available in README file\n#\n############################### 80 COLUMNS WIDE ################################\n\nimport os\nimport platform\n\n'''\n# The path.py script holds mainly for setting the input and output path \n# according to the user's computer system\n'''\n\ndef operating_system(OS):\n '''\n # this function sets the drive letter depending on the specified OS\n # @param: OS\n '''\n\n if OS == platform.system():\n global drive_letter \n drive_letter = os.path.splitdrive(os.getcwd())[0]\n drive_letter = os.path.join(drive_letter, os.sep)\n print(\"Nice. Your drive letter is now set to %s\" % drive_letter)\n return True\n else:\n print(\"Wrong operating system\")\n return False\n \ndef input_path(path):\n '''\n # this function sets the input path\n # @param: path\n ''' \n path_list = path.split(\",\")\n input_path_curr = \"\"\n for i in range(len(path_list)):\n input_path_curr = os.path.join(input_path_curr,path_list[i])\n return input_path_curr\n\ndef output_path(path):\n '''\n # this function sets the output path\n # @param: path\n '''\n path_list = path.split(\",\")\n output_path_curr = \"\"\n for i in range(len(path_list)):\n output_path_curr = os.path.join(output_path_curr,path_list[i])\n return output_path_curr \n \ndef input_path_conversation():\n '''\n # this function takes input path info provided by user \n # sets output path depending on the OS\n # @param: none\n '''\n global input_path_final\n input_path_final = \"\"\n input_path_curr = \"\"\n \n while not os.path.isdir(input_path_final):\n if len(input_path_final) > 0:\n print(\"Wrong path\")\n print(\"What is your input path?\")\n input_path_info = input(\">: \")\n input_path_curr = input_path(input_path_info)\n input_path_final = os.path.join(drive_letter, input_path_curr)\n print('Your input path is now set to',input_path_curr)\n \n return input_path_final\n \ndef output_path_conversation():\n '''\n # this function takes output path info provided by user\n # sets output path depending on the OS\n # @param: none\n '''\n global output_path_final\n output_path_final = \"\"\n output_path_curr = \"\"\n \n while not os.path.isdir(output_path_final):\n if len(output_path_final) > 0:\n print(\"Wrong path\")\n print(\"What is your output path?\")\n output_path_info = input(\">: \")\n output_path_curr = output_path(output_path_info)\n output_path_final = os.path.join(drive_letter, output_path_curr)\n print('Your output path is now set to',output_path_curr)\n \n return output_path_final" }, { "alpha_fraction": 0.6207278966903687, "alphanum_fraction": 0.6267199516296387, "avg_line_length": 29.65986442565918, "blob_id": "9d64137de09fd7ef46d97b132c3ca285db3e3389", "content_id": "a902eded5551ae70eb543db42763e210af236357", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4506, "license_type": "no_license", "max_line_length": 107, "num_lines": 147, "path": "/Scripts/main.py", "repo_name": "naixinzhang/Freddie", "src_encoding": "UTF-8", "text": "################################# NY HOSPITALS #################################\n#\n# Title: Freddie\n# Files: cleaning.py,hospitalsNY.py,input.py, main.py,path.py,regression.py,statistics.py,test_2.py,test.py\n\n#\n# Author:Naixin Zhang\n# Email:[email protected]\n#\n############################### OUTSIDE HELP CREDITS ###########################\n#\n# Persons: Cornelia Ilin\n# Online sources: available in README file\n#\n############################### 80 COLUMNS WIDE ################################\n\n#import modules here\nimport hospitalsNY as hp\nimport path\nimport input as ip\nimport cleaning as cl\nimport statistics as st\nimport regression as rg\n\n\n'''\n#\n# The main.py script holds the user input and response formation for a data analyst \n# chatboot that collects user input and responds appropriately. \n#\n'''\n\ndef os_and_drive_letter():\n '''\n # sets OS and drive letter. The user is prompted to enter the computer's\n # operating system. Example: Windows, Mac.\n # @param: none.\n ''' \n state = False\n while state == False:\n print(\"What is your computer's operating system?\")\n input_OS = input(\">: \")\n state = path.operating_system(input_OS)\n\ndef input_path():\n '''\n # sets the input path and saves its value globally\n # The user is prompted to enter the input path.\n # For example, if user path is \"N:\\Classes\\AAE875\\DataAnalytics\\FinalProgram\\Input\"\n # then Classes, AAE875, DataAnalytics, FinalProgram, Input will be entered\n # in the command line.\n # @param: none.\n '''\n global input_path\n input_path = path.input_path_conversation()\n \ndef output_path():\n '''\n # sets the output path and saves its value globally.\n # The user is prompted to enter the output path.\n # For example, if user path is \"N:\\Classes\\AAE875\\DataAnalytics\\FinalProgram\\Output\"\n # then Classes, AAE875, DataAnalytics, FinalProgram, Output will be entered\n # in the command line.\n # @param: none.\n '''\n global output_path\n output_path = path.output_path_conversation()\n \ndef input_data():\n '''\n # inputs the raw data. The user is prompted to enter the name of the data\n # files (in csv format). For example: SPARCS2014.csv, SPARCS2015.csv,\n # SCARCS2016.csv\n # inputs the data structure. The user is prompted to enter the desired data\n # structure. Available options are: list(csv), array (numpy), dataframe(pandas)\n # @param: none\n '''\n state = False\n while state == False:\n print(\"What are the names of your data files?\")\n input_names = input(\">: \")\n print(\"What is the data structure you would like to work with?\")\n global data_structure\n data_structure = input(\">: \")\n state, data = ip.read_data(input_names, data_structure, input_path)\n return data\n\ndef data_cleaning(data_raw, output_path):\n '''\n # this function will get the current raw data's column number and row number\n # then remove mising values outlier values then make the column name consistent\n # @param: data_raw is the original data including three years data \n #@and the output_path is the output address where we can outpu the data being cleaned\n '''\n cl.row_col_number(data_raw,data_structure)\n data_no_nan = cl.remove_missing_value(data_raw,data_structure)\n data_no_nan_outlier = cl.remove_outliers(data_no_nan)\n cl.data_align(data_no_nan_outlier, output_path)\ndef summary_stats():\n '''\n # this function includes using the final data to draw plots.I use five different formates to plot\n '''\n \n data_clean = st.import_clean_data(output_path)\n st.plot_asthma(data_clean)\n st.plot_pay_source()\n global data_concat\n data_concat = st.print_stay_length_by_disease(data_clean)\n st.plot_cost_vs_year()\n st.plot_charge_cost()\ndef linear_model():\n '''\n # this function doing the regression\n '''\n rg.reg_charge_cost(data_concat)\n \n\n \n######## runs main script ########\n##################################\n\ndef main():\n # print welcome prompts\n hp.welcome_prompt()\n\n # print greetings; get user name\n user_input = input(\">: \")\n hp.greetings(user_input)\n \n # set OS and drive letter\n os_and_drive_letter()\n\n # set input path\n input_path()\n \n # set output path\n output_path()\n \n # input the data\n data_raw, data_structure = input_data()\n \n data_cleaning(data_raw,data_structure,output_path)\n \n summary_stats()\n linear_model()\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.7422303557395935, "alphanum_fraction": 0.7638025879859924, "avg_line_length": 41.07692337036133, "blob_id": "57a6cfddf9b09ae2c4317ccc14f9eae89c32eb71", "content_id": "0065ece663bff6a9017d4ed8ced84a9ae0be2527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2741, "license_type": "no_license", "max_line_length": 504, "num_lines": 65, "path": "/README.md", "repo_name": "naixinzhang/Freddie", "src_encoding": "UTF-8", "text": "# Freddie\n\nThis is a computer program that simulates a Data Analyst chatbot, named Freddie, capable of processing user input and returning desired output following the rules and directions in the script. The chatbot can help identify your computer’s operating system (OS), set the input and output paths, read input data stored into the memory of your computer, provide descriptive statistics for key variables in the analysis, and finally, run a linear regression model of your choice. Check a sample output here.\n\n################################# NY HOSPITALS #################################\n\n**Files**: cleaning.py,hospitalsNY.py,input.py, main.py,path.py,regression.py,statistics.py,test_2.py,test.py\n\n**Author**: Naixin Zhang\n\n**Email**: [email protected]\n\n############################### OUTSIDE HELP CREDITS ###########################\n\n**Online sources:** \n\n1.Importing csv from a subdirectory in Python\nhttps://stackoverflow.com/questions/10235752/importing-csv-from-a-subdirectory-in-python\n\n2.How to open my files in `data_folder` with pandas using relative path?\nhttps://stackoverflow.com/questions/35384358/how-to-open-my-files-in-data-folder-with-pandas-using-relative-path\n\n3.how to join path\nhttps://stackoverflow.com/questions/17438027/os-path-join-and-os-path-normpath-both-add-double-backwards-slash-on-windows\nos.path.join() and os.path.normpath() both add double backwards slash on windows \n\n4.get the drive letter \nhttps://docs.python.org/2/library/os.path.html\n\n5.get the users system\nhttps://docs.python.org/2/library/platform.html\n\n6.how to drop na using panda \nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.dropna.html\n\n7.remove missing values\nhttps://towardsdatascience.com/data-cleaning-with-python-and-pandas-detecting-missing-values-3e9c6ebcf78b\n\n8.Finding outliers in dataset using python\nhttps://medium.com/datadriveninvestor/finding-outliers-in-dataset-using-python-efc3fce6ce32\n\n9.using pretty table for drawing\nhttp://zetcode.com/python/prettytable/\n\n10.pandas.DataFrame.align\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.align.html\n\n11.pandas.concat\n\nhttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.concat.html\n\n12.compare two data series using this Matplotlib code:\nhttps://pythonspot.com/matplotlib-bar-chart/\n\n13.using tabulate\nhttps://pypi.org/project/tabulate/\n\n14.draw for scatter\nhttps://pythonspot.com/matplotlib-scatterplot/\n\n15.for data splitting into training and testing dataset——sklearn.model_selection.train_test_split\nhttps://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\n\n16.Linear Regression Example\nhttps://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html\n" }, { "alpha_fraction": 0.5878438353538513, "alphanum_fraction": 0.589618444442749, "avg_line_length": 36.58333206176758, "blob_id": "79580f1a0065b97b8ee162efdece0282893c501c", "content_id": "927ad59d6821afa8ccb861993a8074c7781df624", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2254, "license_type": "no_license", "max_line_length": 116, "num_lines": 60, "path": "/Scripts/input.py", "repo_name": "naixinzhang/Freddie", "src_encoding": "UTF-8", "text": "################################# NY HOSPITALS #################################\n#\n# Title: Freddie\n# Files: cleaning.py,hospitalsNY.py,input.py, main.py,path.py,regression.py,statistics.py,test_2.py,test.py\n\n#\n# Author:Naixin Zhang\n# Email:[email protected]\n#\n############################### OUTSIDE HELP CREDITS ###########################\n#\n# Persons: Cornelia Ilin\n# Online sources: available in README file\n#\n############################### 80 COLUMNS WIDE ################################\nimport csv\nimport numpy as np\nimport pandas as pd\nimport os\n\n'''\n# The input.py script read data from input/RawData using three formates\n'''\n\n\ndef read_data(file_names, data_structure, input_path):\n '''\n # this function reads in data (file_names)\n # the desired data structure is provided by the user\n # the desired data structure can be list(csv), array (numpy), dataframe(pandas)\n # if the user enters a data structure other than the ones listed above, Freddie\n # responds \"I don't know how to read this data structure\"\n # @param: file_names, data_structure, input_path\n '''\n file_names_list = file_names.split(\",\")\n for i in range(len(file_names_list)):\n if not os.path.isfile(os.path.join(input_path,file_names_list[i])):\n print(\"Wrong file names\")\n return False, \"\"\n\n if data_structure != 'list(csv)' and data_structure != 'array(numpy)' and data_structure != 'dataframe(pandas)':\n print(\"I don't know how to read this data structure\")\n return False, \"\"\n \n data_raw = []\n \n if data_structure == 'list(csv)':\n for filename in file_names_list:\n with open(os.path.join(input_path, filename), 'r') as f:\n reader = csv.reader(f, delimiter=\",\")\n data_raw.append(list(reader))\n return True, data_raw\n if data_structure == 'array(numpy)':\n for filename in file_names_list:\n data_raw.append(np.genfromtxt(os.path.join(input_path, filename),delimiter=','))\n if data_structure == 'dataframe(pandas)':\n for filename in file_names_list:\n df = pd.read_csv(os.path.join(input_path, filename), index_col = None, header = 0, low_memory = False)\n data_raw.append(df)\n return True, data_raw" }, { "alpha_fraction": 0.599513590335846, "alphanum_fraction": 0.6084312796592712, "avg_line_length": 52.059139251708984, "blob_id": "7f04959401bbcfe3b277062ef920a1444fc57ec5", "content_id": "55a0248fb781c8e4b393cd4076331a9151f86379", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9868, "license_type": "no_license", "max_line_length": 921, "num_lines": 186, "path": "/Scripts/cleaning.py", "repo_name": "naixinzhang/Freddie", "src_encoding": "UTF-8", "text": "################################# NY HOSPITALS #################################\n#\n# Title: Freddie\n# Files: cleaning.py,hospitalsNY.py,input.py, main.py,path.py,regression.py,statistics.py,test_2.py,test.py\n\n#\n# Author:Naixin Zhang\n# Email:[email protected]\n#\n############################### OUTSIDE HELP CREDITS ###########################\n#\n# Persons: Cornelia Ilin\n# Online sources: available in README file\n#\n############################### 80 COLUMNS WIDE ################################\n\nimport pandas as pd\nfrom prettytable import PrettyTable\nimport os\n\n\n'''\n# The cleaning.py script holds mainly for data summarize and data cleaning\n'''\n\ndef row_col_number(data_raw,data_structure):\n '''\n # this function aims to get the raw data's shape\n # @param: data_raw is the original data including three years data \n '''\n print(\"Ok, good choice. Tell me what you would like to do next?\")\n input(\">: \")\n rows = 0\n columns = 0\n if data_structure == 'dataframe(pandas)':\n for i in range(len(data_raw)):\n rows = rows + len(data_raw[i].index)\n columns = columns + len(data_raw[i].columns)\n elif data_structure == 'list(csv)':\n rows = len(data_raw)\n columns = len(data_raw[0])\n print(\"Let me check...oh...this data is really big!\")\n print(\"You have %d inpatient discharges and %d variables that documnet these observations\" % (rows, columns))\n\ndef remove_missing_value(data_raw,data_structure):\n \n '''\n # this function aims to deal with missing value\n # @param: data_raw is the original data including three years data \n '''\n\n \n input(\">: \")\n print(\"No problem, I can help you with this. Let's clean the data first.\")\n print(\"Would you like to drop observations with missing values?\")\n input(\">: \")\n \n data_no_nan = []\n \n if data_structure == 'dataframe(pandas)':\n for i in range(len(data_raw)): \n if i == 0:\n data_no_nan.append(data_raw[i].dropna(subset = ['Health Service Area', 'Hospital County', 'Operating Certificate Number', 'Facility ID', 'Facility Name', 'Age Group', 'Zip Code - 3 digits', 'Gender', 'Race', 'Ethnicity', 'Length of Stay', 'Type of Admission', 'Patient Disposition', 'Discharge Year', 'CCS Diagnosis Code', 'CCS Diagnosis Description', 'CCS Procedure Code', 'CCS Procedure Description', 'APR DRG Code', 'APR DRG Description', 'APR MDC Code', 'APR MDC Description', 'APR Severity of Illness Code', 'APR Severity of Illness Description', 'APR Risk of Mortality', 'APR Medical Surgical Description', 'Payment Typology 1', 'Payment Typology 2', 'Payment Typology 3', 'Attending Provider License Number', 'Operating Provider License Number', 'Birth Weight', 'Abortion Edit Indicator', 'Emergency Department Indicator', 'Total Charges', 'Total Costs']));\n if i == 1:\n data_no_nan.append(data_raw[i].dropna(subset = ['Health Service Area', 'Hospital County', 'Operating Certificate Number', 'Facility Id', 'Facility Name', 'Age Group', 'Zip Code - 3 digits', 'Gender', 'Race', 'Ethnicity', 'Length of Stay', 'Type of Admission', 'Patient Disposition', 'Discharge Year', 'CCS Diagnosis Code', 'CCS Diagnosis Description', 'CCS Procedure Code', 'CCS Procedure Description', 'APR DRG Code', 'APR DRG Description', 'APR MDC Code', 'APR MDC Description', 'APR Severity of Illness Code', 'APR Severity of Illness Description', 'APR Risk of Mortality', 'APR Medical Surgical Description', 'Payment Typology 1', 'Payment Typology 2', 'Payment Typology 3', 'Attending Provider License Number', 'Operating Provider License Number', 'Birth Weight', 'Abortion Edit Indicator', 'Emergency Department Indicator', 'Total Charges', 'Total Costs']));\n if i == 2:\n data_no_nan.append(data_raw[i].dropna(subset = ['Health Service Area', 'Hospital County', 'Operating Certificate Number', 'Facility Id', 'Facility Name', 'Age Group', 'Zip Code - 3 digits', 'Gender', 'Race', 'Ethnicity', 'Length of Stay', 'Type of Admission', 'Patient Disposition', 'Discharge Year', 'CCS Diagnosis Code', 'CCS Diagnosis Description', 'CCS Procedure Code', 'CCS Procedure Description', 'APR DRG Code', 'APR DRG Description', 'APR MDC Code', 'APR MDC Description', 'APR Severity of Illness Code', 'APR Severity of Illness Description', 'APR Risk of Mortality', 'APR Medical Surgical Description', 'Payment Typology 1', 'Payment Typology 2', 'Payment Typology 3', 'Attending Provider License Number', 'Operating Provider License Number', 'Birth Weight', 'Abortion Edit Indicator', 'Emergency Department Indicator', 'Total Charges', 'Total Costs', 'Ratio of Total Costs to Total Charges']));\n \n rows = 0\n columns = 0\n for i in range(len(data_no_nan)):\n rows = rows + len(data_no_nan[i].index)\n columns = columns + len(data_no_nan[i].columns)\n \n elif data_structure == 'list(csv)':\n row_index_tbr = [] #tbr stands for to be removed\n for row_index, row in enumerate(data_raw):\n if row_index == 0: #skip row with column names\n continue\n for cell_index, cell in enumerate(data_raw):\n if cell == \"\":\n print(row)\n row_index_tbr.append(data_raw)\n break \n for row_index, row in enumerate(data_raw):\n if row_index in row_index_tbr: #skip rows to be removed (tbr)\n continue\n data_no_nan.append(row)\n rows = len(data_no_nan)\n columns = len(data_no_nan[0])\n \n print(\"I have removed all the missing values in your data.\")\n print(\"You now have %d inpatient discharges and %d that document these observations.\" % (rows, columns))\n return data_no_nan\n\ndef remove_outliers(data_no_nan,data_structure):\n '''\n # this function aims to deal with missing value\n # @param: data_raw is the original data including three years data \n '''\n input(\">: \")\n print(\"Right...would you like to remove data outliers?\")\n input(\">: \")\n if data_structure == 'dataframe(pandas)':\n col_names = [\"Length of Stay\",\"Total Charges\",\"Total Costs\"]\n pd.options.mode.chained_assignment = None\n for i in range(len(data_no_nan)):\n data_no_nan[i].loc[:,('Length of Stay')] = data_no_nan[i].loc[:,('Length of Stay')].apply(lambda x: pd.to_numeric(x, errors = 'coerce'))\n data_no_nan[i] = data_no_nan[i].dropna(subset = ['Length of Stay'])\n for j in range(len(col_names)):\n q1 = data_no_nan[i][col_names[j]].quantile(0.25)\n q3 = data_no_nan[i][col_names[j]].quantile(0.75)\n iqr = q3-q1 #Interquartile range\n fence_low = q1-1.5*iqr\n fence_high = q3+1.5*iqr\n data_no_nan[i] = data_no_nan[i].loc[(data_no_nan[i][col_names[j]] > fence_low) & (data_no_nan[i][col_names[j]] < fence_high)]\n \n rows = 0\n columns = 0\n for i in range(len(data_no_nan)):\n rows = rows + len(data_no_nan[i].index)\n columns = columns + len(data_no_nan[i].columns) \n \n elif data_structure == 'list(csv)':\n index_len_of_stay = data_no_nan[0].index('Length of Stay')\n index_total_charges = data_no_nan[0].index('Total Charges')\n index_total_costs = data_no_nan[0].index('Total Costs')\n my_list = [index_len_of_stay,index_total_charges, index_total_costs] \n for i in my_list:\n for row in data_no_nan:\n new_temp = sorted(row[i])\n q1 = int(row[len(new_temp) * 0.25] - 1)\n q3 = int(row[len(new_temp) * 0.25] - 1)\n iqr = q3-q1 #Interquartile range\n fence_low = q1-1.5*iqr\n fence_high = q3+1.5*iqr\n if row[i] < fence_low or row[i] > fence_high:\n contine\n data_no_nan.append(row) \n \n print(\"I have removed all outliers in your data.\")\n print(\"You now have %d inpatient discharges and %d variables that document these observations.\" % (rows, columns))\n \n return data_no_nan\n\ndef data_align(data_no_nan_outlier, output_path):\n input(\">: \")\n print(\"I am afraid this is not the best way to move forward. The variable names in your data are not consistent overtime.\")\n print(\"Let me put these in a table for you. Please see below:\")\n \n column_names = []\n for i in range(len(data_no_nan_outlier)):\n column_names.append(list(data_no_nan_outlier[i].columns))\n \n t = PrettyTable(['Year', 'Variable Names'])\n t.align['Variable Names'] = \"l\"\n for i in range(len(column_names)):\n t.add_row([2014+i, column_names[i]])\n print(t)\n \n input(\">: \")\n \n print(\"Sure thing! I will use a dictionary for this. Processing...\")\n #add columns\n data_no_nan_outlier[0][\"new\"] = data_no_nan_outlier[0][\"Total Costs\"]/data_no_nan_outlier[0][\"Total Charges\"]\n data_no_nan_outlier[1][\"new\"] = data_no_nan_outlier[1][\"Total Costs\"]/data_no_nan_outlier[1][\"Total Charges\"]\n for i in range(len(data_no_nan_outlier) - 1):\n data_no_nan_outlier[i].columns = data_no_nan_outlier[-1].columns\n \n print(\"Variable names match those in year 2016 now. Please see below:\")\n column_names = []\n for i in range(len(data_no_nan_outlier)):\n column_names.append(list(data_no_nan_outlier[i].columns))\n \n t = PrettyTable(['Year', 'Variable Names'])\n t.align['Variable Names'] = \"l\"\n for i in range(len(column_names)):\n t.add_row([2014+i, column_names[i]])\n print(t)\n \n input(\">: \")\n print(\"Yes, unless you want to do more data cleaning\")\n input(\">: \")\n \n for i in range(len(data_no_nan_outlier)):\n data_no_nan_outlier[i].to_csv(os.path.join(output_path, 'SPARCS' + str(2014+i) + '_clean' + '.csv'))" }, { "alpha_fraction": 0.6489577293395996, "alphanum_fraction": 0.6605792045593262, "avg_line_length": 44.563026428222656, "blob_id": "d9690b54f6995e2319a495e523edf18231757e57", "content_id": "67d093ce8d12f49df1bd626c3c363aaefeb10986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5421, "license_type": "no_license", "max_line_length": 215, "num_lines": 119, "path": "/Scripts/statistics.py", "repo_name": "naixinzhang/Freddie", "src_encoding": "UTF-8", "text": "################################# NY HOSPITALS #################################\n#\n# Title: Freddie\n# Files: cleaning.py,hospitalsNY.py,input.py, main.py,path.py,regression.py,statistics.py,test_2.py,test.py\n\n#\n# Author:Naixin Zhang\n# Email:[email protected]\n#\n############################### OUTSIDE HELP CREDITS ###########################\n#\n# Persons: Cornelia Ilin\n# Online sources: available in README file\n#\n############################### 80 COLUMNS WIDE ################################\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nfrom tabulate import tabulate\n\n'''\n# The statistic.py script holds mainly for data visualization\n'''\n\ndef import_clean_data(output_path):\n '''\n # this function aims to get the final data which is cleaned\n #@parameter: the output_path is the output address where we can output the data being cleaned\n '''\n data_clean = []\n for i in range(3):\n df = pd.read_csv(os.path.join(output_path, 'SPARCS' + str(2014+i) + '_clean' + '.csv'), index_col = None, header = 0, low_memory = False)\n data_clean.append(df)\n return data_clean\n\ndef plot_asthma(data_clean):\n '''\n # this function aims to draw a bar chart to get the Frequency count of patients with asthma conditions by year and type of admission'\n #@parameter: the cleaning data\n '''\n print(\"Sure thing! I can plot some graphs for you. What do you want to know more precisely?\")\n input(\">: \")\n print(\"This is a very interesting question. Let me prepare some graphs for you...\")\n print(\"Here is what I found:\")\n \n data_asthma = []\n for i in range(len(data_clean)):\n data_asthma.append(data_clean[i].loc[data_clean[i]['CCS Diagnosis Description'] == 'Asthma'])\n \n global asthma_concat\n asthma_concat = pd.concat(data_asthma, axis=0, ignore_index=True)\n \n asthma_count = asthma_concat.groupby(['Type of Admission','Discharge Year']).size()\n\n count_plot = asthma_count.Emergency.plot(kind = 'bar', color = 'red', title = 'Frequency count of patients with asthma conditions \\n by year and type of admission', label = 'Emergency')\n count_plot = asthma_count.Urgent.plot(kind = 'bar', bottom = asthma_count.Emergency,color = 'blue', label = 'Urgent')\n count_plot.set_xlabel('Discharge Year')\n count_plot.set_ylabel('Frequency Count')\n plt.xticks(rotation = 0)\n plt.legend()\n plt.show()\n\ndef plot_pay_source():\n '''\n # this function aims to draw a pie to investigate the proportion of different type of payment sources for patients'\n #@parameter: the cleaning data\n '''\n input('>: ')\n print(\"Indeed, very interesting. Would you like me to plot some more graphs?\")\n input(\">: \")\n print(\"It seems that there are 10 types of payment sources. I think a pie chart is more appropriate here. Do you agree?\")\n input(\">: \")\n print(\"Interesting results again. Please see below what I found in the data:\")\n \n plt.figure(figsize=(6,7.5))\n pay_source_plot = asthma_concat.groupby(['Payment Typology 1']).size().plot(kind = 'pie', title = 'Type of payment sources for patients with asthma conditions', shadow = True, labeldistance = None, radius = 1.1)\n plt.legend(bbox_to_anchor=(0, 0), loc = \"upper left\", ncol=2)\n plt.show()\n \n#below here is my findings\ndef print_stay_length_by_disease(data_clean):\n '''\n # this function draw a table to investigate the average length of stay under different severities of illness. \n #The data has been sorted by length of stay from shortest to longest:\"\n '''\n input(\">: \")\n print(\"In the following is the average length of stay under different severities of illness. \\nThe data has been sorted by legnth of stay from shortest to longest:\")\n global data_concat\n data_concat = pd.concat(data_clean, axis=0, ignore_index=True) \n stay_disease = data_concat.groupby('APR Severity of Illness Description', as_index=False)['Length of Stay'].mean().sort_values('Length of Stay')\n print(tabulate(stay_disease, headers='keys', tablefmt='psql'))\n print(\"As the illness is more severe, the length of stay is longer.\")\n return data_concat\n\ndef plot_cost_vs_year():\n '''\n # this function aims to plot a line chart to investigate changes of average total costs in different years. \n '''\n input(\">: \")\n print(\"In the following is the changes of average total costs in different years:\")\n cost_year = data_concat.groupby('Discharge Year', as_index=False)['Total Costs'].mean().sort_values('Discharge Year')\n cost_year_plot = cost_year.plot(x = 'Discharge Year', y = 'Total Costs')\n axes = plt.gca()\n axes.set_xlim([2013.5,2016.5])\n axes.set_ylim([9600,11800])\n plt.xticks([2014, 2015, 2016])\n plt.show()\n print(\"The average total cost increases year by year\")\n \ndef plot_charge_cost():\n '''\n # this function aims to plot a scatter to investigate relationship between total charges and total cost. \n '''\n input(\">: \")\n print(\"I randomly selected 1000 rows of data as the sample. In the following is the scater plot between total charges and total cost\")\n data_sample = data_concat.sample(n = 1000, random_state=1)\n charge_cost = data_sample.plot.scatter(x='Total Charges', y='Total Costs', color='DarkBlue')\n plt.show()\n print(\"Total charges and total cost is more or less in linear relationship. When the total charges is larger, the data distribution is more random.\")" }, { "alpha_fraction": 0.613882839679718, "alphanum_fraction": 0.6290672421455383, "avg_line_length": 31.02777862548828, "blob_id": "e8b65f6949836a0ccdc959ef3b9240efca147521", "content_id": "20206a41dad3a741b9fd13898a65931a393acaee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2305, "license_type": "no_license", "max_line_length": 238, "num_lines": 72, "path": "/Scripts/regression.py", "repo_name": "naixinzhang/Freddie", "src_encoding": "UTF-8", "text": "################################# NY HOSPITALS #################################\n#\n# Title: Freddie\n# Files: cleaning.py,hospitalsNY.py,input.py, main.py,path.py,regression.py,statistics.py,test_2.py,test.py\n\n#\n# Author:Naixin Zhang\n# Email:[email protected]\n#\n############################### OUTSIDE HELP CREDITS ###########################\n#\n# Persons: Cornelia Ilin\n# Online sources: available in README file\n#\n############################### 80 COLUMNS WIDE ################################\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model\nimport pandas as pd\n\n'''\n# The regression.py script holds mainly for doing linear regression\n'''\n\n\ndef reg_charge_cost(data_concat):\n '''\n # this function do the regression to investigate the relationship between the total charges and total costs\n # sets output path depending on the OS\n # @param: none\n '''\n \n input(\">: \")\n print(\"Sure, let's explore the relationship between the total charges and total costs. \\nWe randomly selected 2000 data points as a sample. 2500 of the sample data points are selected as training data sets and the rest are test sets\")\n #used for initializing the internal random number generator, which will decide the splitting of data into train and test indices\n data_sample = data_concat.sample(n = 3000, random_state=1)\n \n X = data_sample['Total Charges']\n Y = data_sample['Total Costs']\n\n X=X.values.reshape(len(X.index),1)\n Y=Y.values.reshape(len(Y.index),1)\n \n # Split the data into training/testing sets\n X_train = X[:-500]\n X_test = X[-500:]\n\n # Split the targets into training/testing sets\n Y_train = Y[:-500]\n Y_test = Y[-500:]\n\n# Plot outputs\n plt.scatter(X_test, Y_test, color='black', alpha=0.3)\n plt.title('Test Data')\n plt.xlabel('Total Charges')\n plt.ylabel('Total Costs')\n\n plt.show()\n \n # Create linear regression object\n regr = linear_model.LinearRegression()\n\n # Train the model using the training sets\n regr.fit(X_train, Y_train)\n\n # Plot outputs\n plt.plot(X_test, regr.predict(X_test), color='red',linewidth=3)\n plt.scatter(X_test, Y_test, color='black', alpha=0.3)\n plt.title('Linear Regression')\n plt.xlabel('Total Charges')\n plt.ylabel('Total Costs')\n plt.show()" } ]
8
Aaradhyaa717/BeautifulSoup
https://github.com/Aaradhyaa717/BeautifulSoup
1389c27d8d4879c6a0fd169dc96e66c5133555b6
2938e332fad02d051375cd451997752156fe2c82
00d1ef0983ac559534639db9cd0afeefe5bd6ff2
refs/heads/main
2023-04-11T11:51:15.386387
2021-04-19T05:52:47
2021-04-19T05:52:47
359,344,230
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7966101765632629, "alphanum_fraction": 0.7966101765632629, "avg_line_length": 58, "blob_id": "42ba8eec37059d6157dbdbf98d9090ca1bd69d5b", "content_id": "4ce79f2b11df8a88ba03e12000a28465263022c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 118, "license_type": "no_license", "max_line_length": 101, "num_lines": 2, "path": "/README.md", "repo_name": "Aaradhyaa717/BeautifulSoup", "src_encoding": "UTF-8", "text": "# BeautifulSoup\nA table is scrapped using beautiful soup with the details of year, host and winner of Fifa World Cup.\n" }, { "alpha_fraction": 0.6199821829795837, "alphanum_fraction": 0.6556645631790161, "avg_line_length": 20.459999084472656, "blob_id": "f4e3c357bb5c2fccd41d64de0c7b1074957bb134", "content_id": "0b069b28f6044ab60f59e5ae21c70c15a18fcfde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1121, "license_type": "no_license", "max_line_length": 94, "num_lines": 50, "path": "/fifa.py", "repo_name": "Aaradhyaa717/BeautifulSoup", "src_encoding": "UTF-8", "text": "Python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:20:19) [MSC v.1925 32 bit (Intel)] on win32\r\nType \"help\", \"copyright\", \"credits\" or \"license()\" for more information.\r\n>>> import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\npage = requests.get(\"https://www.foxsports.com/soccer/fifa-world-cup/history\")\r\n\r\nsoup = BeautifulSoup(page.content, 'html.parser')\r\n\r\ntable = soup.find('table', {'class':'wisbb_heStandard'})\r\n#print(table)\r\n\r\ncols = []\r\nfor name in table.select('th'):\r\n cols.append(name.text.strip())\r\n \r\n#print(cols)\r\n\r\ntitles = cols[:3]\r\n#print(titles)\r\n\r\ninfo = []\r\nfor match in table.select('td'):\r\n info.append(match.text.strip())\r\n#print(info)\r\n\r\n# Extracting year, host and winner of each match\r\nyear = info[::9]\r\nhost = info[1::9]\r\nchampion = info[2::9]\r\n'''print(year)\r\nprint(host)\r\nprint(champion)\r\n'''\r\n\r\n# Converting list into dictionary\r\nworld_cup_dict = {\r\n 'Year' : year,\r\n 'Host' : host,\r\n 'Winner' : champion\r\n}\r\n\r\n# Converting dict into dataframe\r\ndf= pd.DataFrame.from_dict(world_cup_dict)\r\n\r\n#Starting the row count from 1\r\ndf.index = df.index+1\r\n\r\nprint(df )" } ]
2
amourav/kNN_from_scratch
https://github.com/amourav/kNN_from_scratch
1021d588cce0fa920f5a0da919070aff897857fd
a708dd43cbc86321b7db5d7eaa79f28cc59b99f3
86872cc151a4ede1d05c165d1b4e5322071490d3
refs/heads/master
2020-07-09T20:15:16.492344
2019-09-02T02:38:24
2019-09-02T02:38:24
204,072,776
2
1
null
null
null
null
null
[ { "alpha_fraction": 0.5700173377990723, "alphanum_fraction": 0.5736568570137024, "avg_line_length": 34.17683029174805, "blob_id": "860a842e0bff68baaf61e222d04254db83233f27", "content_id": "ca25d2388dbac9b6d2ac2831719e77afa11867d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5770, "license_type": "no_license", "max_line_length": 99, "num_lines": 164, "path": "/kNearestNeighbors.py", "repo_name": "amourav/kNN_from_scratch", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom collections import Counter\n\n\ndef euc_dist(a, b):\n \"\"\"\n calculate euclidean distance (l2 norm of a-b) between a and b\n :param a: data point X[i, :] (numpy array) shape = n\n :param b: data point X[j, :] (numpy array) shape = n\n :return: distance (float)\n \"\"\"\n return np.linalg.norm(a - b, ord=2)\n\n\ndef accuracy(y_true, y_pred):\n \"\"\"\n measure accuracy of predictions (y_pred) given true labels (y_true)\n :param y_true: true class labels (numpy array) - e.g. np.array([0, 1, 2, 1])\n :param y_pred: predicted class labels (numpy array) e.g. np.array([0, 2, 1, 1])\n :return: accuracy (float)\n \"\"\"\n return np.sum(y_true == y_pred) / len(y_true)\n\n\ndef norm_data(X):\n \"\"\"\n normalize data to have zero mean and unit variance\n :param X: input data (array) - X.shape = (n_samples, m_features)\n :return:\n \"\"\"\n mean, std = X.mean(axis=0), X.std(axis=0)\n return (X - mean) / std, (mean, std)\n\n\ndef argsort(a):\n \"\"\"\n sort numpy array (ascending)\n :param a: numpy array to be sorted\n :return: sorted numpy array (array)\n \"\"\"\n return np.array(a).argsort()\n\n\nclass kNearestNeighbor():\n \"\"\"\n k nearest neighbour classifier - https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm\n\n k - nearest neighbours (int) - default = 3\n dist_metric - distance metric (str) - default = euclidean\n norm - normalize data to zero mean\n and unit variance (bool) - default = True\n example:\n knn = kNearestNeighbor(k=3, dist_metric='euclidean', norm=True)\n knn.fit(X_train, y_train)\n y_test_pred = knn.predict(X_test)\n \"\"\"\n\n def __init__(self, k=3, dist_metric='euclidean', norm=True):\n \"\"\"\n :param k: nearest neighbours (int) - default = 3\n :param dist_metric: distance metric (str) - default = euclidean\n :param norm: normalize data to unit mean and variance (bool) - default = True\n \"\"\"\n self.k = k\n self.isFit = False # model fitting done?\n self.norm = norm\n self._set_dist_func(dist_metric)\n\n def fit(self, X_train, y_train, v=False):\n \"\"\"\n Define training data for\n :param X_train: training input data (numpy array) - X.shape = (n_samples, m_features)\n :param y_train: training labels (numpy array) - X.shape = (n_samples)\n :param v: verbose. print trn acc if True (bool)\n :return: None\n \"\"\"\n # check data\n if self.norm:\n X_train, (trn_mean, trn_std) = norm_data(X_train)\n self.trn_mean = trn_mean\n self.trn_std = trn_std\n self.X_train = X_train\n self.y_train = y_train\n\n y_train_pred, y_train_pred_proba = [], []\n for i, x_i in enumerate(X_train):\n distances = []\n for j, x_j in enumerate(X_train):\n if i == j:\n dist_ij = 0\n else:\n dist_ij = self.dist_func(x_i, x_j)\n\n distances.append(dist_ij)\n pred_i = self.estimate_point(distances, y_train)\n y_train_pred_i, y_train_pred_proba_i = pred_i\n y_train_pred.append(y_train_pred_i)\n y_train_pred_proba.append(y_train_pred_proba_i)\n\n if v:\n trn_acc = accuracy(y_train, y_train_pred)\n print('training accuracy: {}'.format(trn_acc))\n self.isFit = True\n\n def estimate_point(self, distances, y):\n \"\"\"\n estimate most likely class given k neighbours\n :param distances: distances to all other points (numpy array)\n :param y: labels associated with each entry in distances\n :return: most likely class, probability of class\n \"\"\"\n sort_idx = argsort(distances)\n y_closest = y[sort_idx][:self.k]\n most_common = Counter(y_closest).most_common(1)[0]\n y_pred_i = most_common[0]\n y_pred_proba_i = most_common[1] / len(y_closest)\n return y_pred_i, y_pred_proba_i\n\n def norm_new(self, X_new):\n \"\"\"\n normalize test data based on mean and variance of training data\n :param X_new: input data of a new set of samples (numpy array) -\n X_new.shape = (n_samples, m_features)\n :return: normalized data (numpy array)\n \"\"\"\n return (X_new - self.trn_mean) / self.trn_std\n\n def predict(self, X_new):\n \"\"\"\n predict class labels based on training data\n :param X_new: input data of a new set of samples (numpy array) -\n X_new.shape = (n_samples, m_features)\n :return: y_new_pred: predicted class labels of X_new (numpy array)\n \"\"\"\n if not (self.isFit):\n raise Exception('run knn.fit(x_trn, y_trn) before running knn.predict(x_new)')\n if self.norm:\n X_new = self.norm_new(X_new)\n\n y_new_pred, y_new_pred_proba = [], []\n for i, x_i in enumerate(X_new):\n distances = []\n for j, x_j in enumerate(self.X_train):\n dist_ij = self.dist_func(x_i, x_j)\n distances.append(dist_ij)\n\n pred_i = self.estimate_point(distances, self.y_train)\n y_pred_i, y_pred_proba_i = pred_i\n y_new_pred.append(y_pred_i)\n y_new_pred_proba.append(y_pred_proba_i)\n return y_new_pred\n\n def _set_dist_func(self, dist_metric):\n \"\"\"\n set distance metric\n :param dist_metric: method for measuring distance between points (str)\n default = euclidean\n :return:\n \"\"\"\n implemented_metrics = {'euclidean': euc_dist, }\n try:\n self.dist_func = implemented_metrics[dist_metric]\n except KeyError:\n raise Exception('{} is not an acceptable argument for dist_metric'.format(dist_metric))\n\n" }, { "alpha_fraction": 0.7133212685585022, "alphanum_fraction": 0.7263296842575073, "avg_line_length": 48.373985290527344, "blob_id": "a7956ec3967cbc75e98d1511e0850c3c12bb4bdc", "content_id": "9184f32d8612bf0b615302ac8761648f1b66f5c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6074, "license_type": "no_license", "max_line_length": 612, "num_lines": 123, "path": "/README.md", "repo_name": "amourav/kNN_from_scratch", "src_encoding": "UTF-8", "text": "# K Nearest Neighbor\n\nImplementation of kNN in Python (3.6).\n\n\n## Description\n\nk-nearest neighbors (or \"neighbours\" for us Canadians) is a non-parametric method used in classification. The input consists of the k closest training examples in the feature space. The output is a class membership. An object is classified by a plurality vote of its neighbors, with the object being assigned to the class most common among its k nearest neighbors (k is a positive integer, typically small). If k = 1, then the object is simply assigned to the class of that single nearest neighbor.\nIn kNN regression, the output is the property value for the object. This value is the average of the values of k nearest neighbors. kNN is a type of instance-based learning, or lazy learning, where the function is only approximated locally and all computation is deferred until classification.\n\nsource: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm\n\n## Dataset\n\nWe will use the iris dataset to demo the kNN classifier (Fig. 1)\n\nThe Iris flower data set or Fisher's Iris data set is a multivariate data set introduced by the British statistician and biologist Ronald Fisher in his 1936 paper The use of multiple measurements in taxonomic problems as an example of linear discriminant analysis. It is sometimes called Anderson's Iris data set because Edgar Anderson collected the data to quantify the morphologic variation of Iris flowers of three related species. Two of the three species were collected in the Gaspé Peninsula \"all from the same pasture, and picked on the same day and measured at the same time by the same person with the same apparatus\".\n\n![Image](https://github.com/amourav/kNN_from_scratch/blob/master/readme_imgs/iris.PNG)\n\nFigure 1: Iris Flower Species [source](https://www.flickr.com/photos/gmayfield10/3352170798/in/photostream/)\n\n\nThis data sets consists of 3 different types of irises (Setosa, Versicolour, and Virginica) petal and sepal length, stored in a 150x4 numpy.ndarray. The rows being the samples and the columns being: Sepal Length, Sepal Width, Petal Length and Petal Width.\n\nsource: https://en.wikipedia.org/wiki/Iris_flower_data_set\n\n\n## Overview\n\nFor illustration purposes we will only be using the two features (Sepal Width, Petal Width). We will also split the dataset into training (120 samples) and testing (30 samples) A scatterplot illustrating the distribution of iris flower species based on these features (Fig. 2).\n\n![Image](https://github.com/amourav/kNN_from_scratch/blob/master/readme_imgs/scatter1.png) <br/>\nFigure 2: Scatterplot of samples in iris dataset. <br/> <br/> <br/>\n\nNow that we split the dataset into training and testing, we can run our kNN model (Fig. 3)\n```\ntrain accuracy: 0.97\ntest accuracy: 0.92\n```\n![Image](https://github.com/amourav/kNN_from_scratch/blob/master/readme_imgs/scatter2.png) <br/>\nFigure 3: Scatterplot of iris dataset labeled by species (sepal length vs sepal width). Background colour represents best guess of the knn classifier for the class label of the hypothetical point in this feature space. <br/> <br/> <br/>\n\nWhile this performance is good, we can further improve the accuracy by tuning the value of `k` on the test set (Figs. 4, 5).\n\n![Image](https://github.com/amourav/kNN_from_scratch/blob/master/readme_imgs/tune_k.png) <br/>\nFigure 4: Accuracy for each value of k evaluated on the training and testing data. <br/>\n```\noptimal value for k: 12\ntrain accuracy: 0.97\ntest accuracy: 0.94\n```\n![Image](https://github.com/amourav/kNN_from_scratch/blob/master/readme_imgs/scatter2b.png) <br/>\nFigure 5: Scatterplot of iris dataset with predicted (knn - k=12) and actual class labels (o - train set, \nx - test set). <br/> <br/> <br/>\n\nOf course, we are not limited to using these two features, or any two features (Fig. 6). <br/> <br/>\n![Image](https://github.com/amourav/kNN_from_scratch/blob/master/readme_imgs/knn_plots_full.png) <br/>\nFigure 6: Pairwise comparison of features in the iris dataset and predicted labels (knn - k=12). \n\n## Dependencies\n\nTo run kNearestNeighbor you only need the `numpy` package.\nTo run the demo notebook you will need a few additional packages:\n`matplotlib`\n`sklearn`\n\n\n## Usage\n\n`knn = kNearestNeighbor(k=k)` Initialize knn classifier with the number of neighbours.\n\n`knn.fit(X_trn, y_trn)` Fit the classifier to the training data. (Note: all this does is evaluate the training accuracy and save the training set.\n\n`y_pred = knn.predict(X_test)` This will run inference on new input data by measuring distance of points in X_trn to each point in X_test.\n\nexample:\n\n```\nfrom kNearestNeighbors import kNearestNeighbor, accuracy\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\n# Load Iris Dataset\niris = datasets.load_iris()\nX = iris.data \ny = iris.target\n\n# For illustration purposes we will only be using the two features in the dataset\nfeature_idxs = [1, 3] # SET FEATURES BY INDEX <------------------\nfeature_names = ['Sepal Length', 'Sepal Width', 'Petal Length', 'Petal Width']\n\n# We will also split the dataset into training and testing so we can evaluate the kNN classifier\nX_trn_, X_test_, y_trn, y_test = train_test_split(X, \n y, \n test_size=0.333, \n random_state=0,\n stratify=y)\nX_trn, X_test = X_trn_[:, feature_idxs], X_test_[:, feature_idxs]\n\nprint(\"X_trn.shape = {}, X_test.shape = {}\".format(X_trn.shape, X_test.shape))\nprint(\"Features: {}, {}\".format(feature_names[feature_idxs[0]], feature_names[feature_idxs[1]]))\n\n# fit classifier\nk = 12\nknn = kNearestNeighbor(k=k)\nknn.fit(X_trn, y_trn, v=False)\ny_trn_pred = knn.predict(X_trn)\ntrn_acc = accuracy(y_trn_pred, y_trn)\ny_test_pred = knn.predict(X_test)\ntest_acc = accuracy(y_test_pred, y_test)\nprint('train accuracy: {}'.format(trn_acc))\nprint('test accuracy: {}'.format(test_acc))\n\n>> train accuracy: 0.97\n>> test accuracy: 0.94\n```\n\n\n## Author\n\nAndrei Mouraviev\n" } ]
2
Yetianyun/oPictures
https://github.com/Yetianyun/oPictures
8e0649fa5210e0870f30a642f0fc2d25568a7de9
55755c31ac5feb4f736d276a4c404c5747a93ad6
7ba76b7da7e93fc887730b0fee32508ccb18cca5
refs/heads/master
2020-11-25T22:56:01.251594
2020-01-18T08:29:27
2020-01-18T08:29:27
228,875,420
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.656812310218811, "alphanum_fraction": 0.7249357104301453, "avg_line_length": 26.785715103149414, "blob_id": "a484c58da467c8da2ad7407db300c768110b90ba", "content_id": "641333505ea6d95c9ed8594ffdadc6666a504127", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 880, "license_type": "no_license", "max_line_length": 78, "num_lines": 28, "path": "/8.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#去除水印\nimport cv2\nimport numpy as np\n# path = \"/Users/leaf/project/scanWords/testPic\" #记得不要有中文路径\nimg = cv2.imread(\"/Users/leaf/project/scanWords/testPic/2019-12-20-0002.jpg\" )\n# img = cv2.imread(path)\n \nheight,width = img.shape[0:2]\n# print(height,width)\n\n#开始操作\nthresh = cv2.inRange( img,np.array([0,0,0]),np.array([192,192,192]) )\nscan = np.ones( (3,3),np.uint8)\ncor = cv2.dilate(thresh,scan,iterations=1)\nspecular = cv2.inpaint(img,cor,5,flags=cv2.INPAINT_TELEA)\n#操作结束,下面开始是输出图片的代码\ncv2.namedWindow(\"image\",0)\ncv2.resizedWindow(\"image\",int(width/2),int(height/2))\ncv2.imshow(\"image\",img)\n\ncv2.namedWindow(\"modified\",0)\ncv2.resizeWindow(\"modified\",int(width/2),int(height/2))\ncv2.imshow(\"modified\",specular)\n\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n\n# #代码纯手打,如果run不出优先检查错别字\n" }, { "alpha_fraction": 0.6286764740943909, "alphanum_fraction": 0.6593137383460999, "avg_line_length": 22.65217399597168, "blob_id": "df6f887e54bd93e8c009ec894f2b3845ea3c844f", "content_id": "91e6d78e1eb08ecbc370fb7389b1f5494996df1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1782, "license_type": "no_license", "max_line_length": 75, "num_lines": 69, "path": "/20.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "# _*_ coding:utf-8 _*_\n#从图片上扣一块下来。并且分析哪个图是哪张纸。\n__author__ = 'admin'\nfrom PIL import Image, ImageDraw, ImageFont\nimport cv2\nfrom aip import AipOcr\nimport re\nimport configparser\n\nfile_path = 'testPic/2019-12-20-0101.jpg'\nimg = cv2.imread(file_path)\nsp = img.shape\n\nsz1 = sp[0]#height(rows) of image\nsz2 = sp[1]#width(colums) of image\n\nzuoshangX = 0\nzuoshangY = sz1 * 0.93\nyouxiaX = sz2\nyouxiaY = sz1\n\nim = Image.open(file_path)\n# 设置抠图区域\nbox = (zuoshangX, zuoshangY, youxiaX, youxiaY)\n# 从图片上抠下此区域\nregion = im.crop(box)\nnewPic = \"testPic/pic_name.jpg\"\nregion.save(newPic, quality=95)\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config/config.ini\", encoding=\"utf-8\")\nbaiduAppId = config.get(\"BaiduOCR\", \"appId\")\nbaiduApiKey = config.get(\"BaiduOCR\", \"apiKey\")\nbaiduSecretKey = config.get(\"BaiduOCR\", \"secretKey\")\n\nconfig = {\n 'appId': baiduAppId,\n 'apiKey': baiduApiKey,\n 'secretKey': baiduSecretKey\n}\n\n\nclient = AipOcr(**config)\n\ndef get_file_content(file):\n with open(file, 'rb') as fp:\n return fp.read()\n\ndef img_to_str(image_path):\n image = get_file_content(image_path)\n # 通用文字识别(可以根据需求进行更改)\n result = client.basicGeneral(image)\n return result\n\nif __name__ == '__main__':\n api_result = img_to_str(\"testPic/pic_name.jpg\")\n words_result = (i['words'] for i in api_result['words_result']) # 文本内容\n s = '\\n'.join(words_result) #\n # print(api_result)\n\n pattern1 = \"([一二三四五六七八九十])\"\n m1 = re.search(pattern1,s)\n dataNO1 = m1.group() #主数据\n print ( dataNO1 )\n\n pattern2 = \"([123456789])\"\n m2 = re.search(pattern2,s) #次数据\n dataNO2 = m2.group()\n print ( dataNO2 )\n" }, { "alpha_fraction": 0.5972222089767456, "alphanum_fraction": 0.5972222089767456, "avg_line_length": 24.323530197143555, "blob_id": "01acd550a099b8faf1f759e4293bee2ef6a88df8", "content_id": "ab7099733b945870bb5b99a8f6c11cce89b9ca74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 984, "license_type": "no_license", "max_line_length": 50, "num_lines": 34, "path": "/19.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#将数据写入数据库\n#使用pymysql连接mysql数据库\nimport pymysql\nimport configparser\n\ndef pyconn():\n config = configparser.ConfigParser()\n config.read('config/config.ini')\n section = 'mysql'\n conf = {\n 'host': config.get(section, 'host'),\n 'port': config.getint(section, 'port'),\n 'user': config.get(section, 'user'),\n 'passwd': config.get(section, 'password'),\n 'db': config.get(section, 'database'),\n 'charset': config.get(section, 'charset')\n }\n conn = pymysql.connect(**conf)\n\n # 使用 cursor() 方法创建一个游标对象 cursor\n cursor = conn.cursor()\n # 使用 execute() 方法执行 SQL 查询\n cursor.execute(\"select version()\")\n # 使用 fetchone() 方法获取单条数据.\n data = cursor.fetchone()\n # 输出查询的数据:\n print(\"Database Version: %s\" % data)\n # 关闭数据库连接e\n conn.close()\n return conn\n\n\nif __name__ == \"__main__\":\n pyconn()\n\n\n\n" }, { "alpha_fraction": 0.7295825481414795, "alphanum_fraction": 0.7313974499702454, "avg_line_length": 20.230770111083984, "blob_id": "7a1254434403f07d7162553a013afec05482e53e", "content_id": "d84a380e0bf3fc22674ca4f2c5c0f6d24dc7df08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 60, "num_lines": 26, "path": "/5.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport configparser\nimport os\n\ncf = configparser.ConfigParser()\n\n# read(filename) 读文件内容\nfilename = cf.read(\"./config.ini\")\nprint(filename)\n\n# sections() 得到所有的section,以列表形式返回\nsec = cf.sections(\"appId\")\nprint(sec)\n\n# options(section) 得到section下的所有option\nopt = cf.options()\nprint(opt)\n\n# # items 得到section的所有键值对\n# value = cf.items(\"driver\")\n# print(value)\n\n# # get(section,option) 得到section中的option值,返回string/int类型的结果\n# mysql_host = cf.get(\"mysql\",\"host\")\n# mysql_password = cf.getint(\"mysql\",\"password\")\n# print(mysql_host,mysql_password)" }, { "alpha_fraction": 0.4746716618537903, "alphanum_fraction": 0.5131332278251648, "avg_line_length": 30.80596923828125, "blob_id": "7d2461d2c6433798210571c98ad1a815842baac6", "content_id": "6e00de348f384aa251264ec7a3a8266d901d2525", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2248, "license_type": "no_license", "max_line_length": 102, "num_lines": 67, "path": "/25.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#对切分图片进行调整\nfrom PIL import Image\nimport os,cv2\nimport getAngle #abc\nimport numpy as t\n\ndef splitPic(source_path):\n picNo = 0\n for scan_file in os.listdir(source_path):\n source_file_path = 'testPic/' + scan_file\n angle = get_angle(source_file_path)\n print(angle)\n\n img = Image.open(source_file_path)\n #遇到需要图的,要先转一下。\n\n info = img.size\n width = info[0]\n height = info[1]\n getWidth = t.zeros(width)\n for k in range(0,width):\n getWidth[k] = 0\n getWidth[0] += 1\n img = cv2.imread(source_file_path)\n # 查找页面中的分割线\n startHeight = int(height * 0.2)\n endHeight = int(height * 0.8) - 1\n startWidth = int(width * 0.35)\n endWidth = int(width * 0.7) - 1\n step = 3\n for row in range(startHeight,endHeight,step): # 图片的高\n for col in range(startWidth,endWidth,step): # 图片的宽\n channel1 = img[row][col][0]\n channel2 = img[row][col][1]\n channel3 = img[row][col][2]\n if ( ( channel1 >= 70 and channel1 <= 80 ) and\n ( channel2 >= 70 and channel2 <= 80 ) and\n ( channel3 >= 70 and channel3 <= 80 ) ):\n m = 1 #表示可能是黑色\n else:\n m = 0\n getWidth[col] += m\n # print(row, col, channel3)\n\n #求最大的col\n maxCol = 0\n for col in range(startWidth, endWidth, step): # 图片的宽\n if ( getWidth[col] > maxCol ):\n maxCol = getWidth[col]\n maxColValue = col\n\n img = Image.open(source_file_path)\n img.crop((0, 0, maxColValue - 18, 3496)).save('target_pic/target_s_a_'+str(picNo)+'.jpg')\n img.crop((maxColValue + 18, 0, 2472, 3496)).save('target_pic/target_s_a_'+str(picNo+1)+'.jpg')\n picNo += 2\n\nif __name__ == '__main__':\n #引用测试\n source_file_path = \"testPic/2019-12-20-0101.jpg\"\n angle = getAngle.get_angle(source_file_path)\n print(angle)\n # source_path = \"testPic/\"\n #\n # print(angle)\n #\n #\n # splitPic(source_path)\n\n" }, { "alpha_fraction": 0.561998188495636, "alphanum_fraction": 0.6333630681037903, "avg_line_length": 27, "blob_id": "c415b54e07ddc20572fdd3b369cb4d0fbcc9fddf", "content_id": "a05d9f2d268f904610735fe6671ae3268b7eeea0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1141, "license_type": "no_license", "max_line_length": 83, "num_lines": 40, "path": "/9.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#水平校正图片\n\nimport os\nimport cv2\nimport math\nimport random\nimport numpy as np\nfrom scipy import misc, ndimage\n\nfilepath = '/Users/leaf/project/scanWords/testPic/'\nfor filename in os.listdir(filepath):\n\tprint(filename)\n\timg = cv2.imread('/Users/leaf/project/scanWords/testPic/%s'%filename)\n\t# print(img)\n\tgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\t# edges = cv2.Canny(gray,50,150,apertureSize = 3)\n\t#霍夫变换\n\t# lines = cv2.HoughLines(edges,1,np.pi/180,0)\n\t\n\t# for rho,theta in lines[0]:\n\t# a = np.cos(theta)\n\t# b = np.sin(theta)\n\t# x0 = a*rho\n\t# y0 = b*rho\n\t# x1 = int(x0 + 1000*(-b))\n\t# y1 = int(y0 + 1000*(a))\n\t# x2 = int(x0 - 1000*(-b))\n\t# y2 = int(y0 - 1000*(a))\n\t# if x1 == x2 or y1 == y2:\n\t# \tcontinue\n\t# t = float(y2-y1)/(x2-x1)\n\n\t# # rotate_angle = math.degrees(math.atan(t))\n\t# rotate_angle = 0.006277944427853939\n\t# # if rotate_angle > 45:\n\t# # \trotate_angle = -90 + rotate_angle\n\t# # elif rotate_angle < -45:\n\t# # \trotate_angle = 90 + rotate_angle\n\t# rotate_img = ndimage.rotate(img, rotate_angle)\n\t# misc.imsave('/Users/leaf/project/scanWords/testPic/pic_%s'%filename, rotate_img)\n " }, { "alpha_fraction": 0.41067415475845337, "alphanum_fraction": 0.4219101071357727, "avg_line_length": 27.238094329833984, "blob_id": "f6050ada9d75fb35c4468ef748bc19189172c052", "content_id": "ec7379228b9ed7d91f16586b35ec83c52aedc9d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1974, "license_type": "no_license", "max_line_length": 173, "num_lines": 63, "path": "/opreateContent.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#去重\ndef qch(zf):\n zf.replace(\")\",\" \")\n zf.replace(\"(\",\" \")\n zf = zf.strip()\n l = len(zf)\n for i in range(2,l):\n if ( l % i == 0 ):\n m = 0\n jzf = zf[0:i]\n for j in range(0,l-1,i):\n xzf = zf[j:j+i]\n if ( xzf != jzf ):\n m = 1\n break\n if ( m == 0 ):\n return xzf\n return zf\n\n#处理某网站的数据\ndef operateText(source_file,target_file):\n fo = open(target_file,\"w\")\n textList = ['题目提供者','应用','题库','训练','比赛','记录','题目列表','提交','通过','时间限制','内存限制','历史分数','普及组','提交','通过','时间','内存','题目','难度','提交','查看','标签','进入','相关','推荐','展开','说明','初始','数据']\n file = open(source_file)\n charList = ['+','>','<','/']\n yw = \"\"\n for line in file:\n zz = line[0:2]\n if ( zz[0:1] == \"P\" or zz in textList or zz == \"\\n\"):\n continue\n detail_line = []\n pos = 0\n if ( line.find(\"for\") > 0):\n print(line)\n continue\n\n fenHao = line.split(\";\")\n line = \";\\n\".join(fenHao)\n splitContent = line.split(\"//\")\n line = splitContent[0]\n for detail in line:\n pos += 1\n #判断是否是汉字\n if ( detail in charList or \"\"'\\u4e00' <= detail <= '\\u9fff' or detail == \",\" or detail ==\"。\"):\n hd = qch(yw)\n detail = qch(hd) + detail\n detail_line.append(detail)\n yw = \"\"\n else :\n yw += detail\n\n now_line = ''.join(detail_line)\n fo.write(now_line)\n\n fo.close()\n file.close()\n\nif __name__ == '__main__':\n # 引用测试\n source_file = \"text/source.txt\"\n target_file = \"text/target.txt\"\n operateText(source_file,target_file)\n print(\"处理完成\")\n\n" }, { "alpha_fraction": 0.6627118587493896, "alphanum_fraction": 0.7220339179039001, "avg_line_length": 21.730770111083984, "blob_id": "bbfc298f0a0a2dc0b2087a6ef3c6c5902fc7d50b", "content_id": "a43b219fa466061144b7967b4fc5d8116bf07106", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 614, "license_type": "no_license", "max_line_length": 57, "num_lines": 26, "path": "/1.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "from skimage import data, exposure, img_as_float,io\nimport matplotlib.pyplot as plt\n\n#调整图片的对比度\nimage = io.imread('./sourceImages/WX20191218-224733.png')\n# image = img_as_float(data.moon())\ngam1= exposure.adjust_gamma(image, 2) #调暗\ngam2= exposure.adjust_gamma(image, 0.5) #调亮\nplt.figure('adjust_gamma',figsize=(8,8))\n\nplt.subplot(131)\nplt.title('origin image')\nplt.imshow(image,plt.cm.gray)\nplt.axis('off')\n\nplt.subplot(132)\nplt.title('gamma=2')\nplt.imshow(gam1,plt.cm.gray)\nplt.axis('off')\n\nplt.subplot(133)\nplt.title('gamma=0.5')\nplt.imshow(gam2,plt.cm.gray)\nplt.axis('off')\n\nplt.show()" }, { "alpha_fraction": 0.5105352401733398, "alphanum_fraction": 0.5388714075088501, "avg_line_length": 30.769229888916016, "blob_id": "e42db2957df9ce70a6245ed64832066db9020c07", "content_id": "cb675bea98a8e0f46abee528079ae61d992ed394", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4465, "license_type": "no_license", "max_line_length": 114, "num_lines": 130, "path": "/23.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "# _*_ coding:utf-8 _*_\n#将数据存入数据库,本代码是处理\"答案\"的\n__author__ = 'admin'\nfrom PIL import Image, ImageDraw, ImageFont\nimport cv2\nfrom aip import AipOcr\nimport re\nimport os\nimport pymysql\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config/config.ini\", encoding=\"utf-8\")\nbaiduAppId = config.get(\"BaiduOCR\", \"appId\")\nbaiduApiKey = config.get(\"BaiduOCR\", \"apiKey\")\nbaiduSecretKey = config.get(\"BaiduOCR\", \"secretKey\")\n\nconfig = {\n 'appId': baiduAppId,\n 'apiKey': baiduApiKey,\n 'secretKey': baiduSecretKey\n}\n\nclient = AipOcr(**config)\ndef get_file_content(file):\n with open(file, 'rb') as fp:\n return fp.read()\n\ndef img_to_str(image_path):\n image = get_file_content(image_path)\n # 通用文字识别(可以根据需求进行更改)\n result = client.basicGeneral(image)\n return result\n\n# if __name__ == '__main__':\n#source_path 源文件目录\ndef read_source(source_path):\n num_dict = {\"一\": \"1\", \"二\": \"2\", \"三\": \"3\", \"四\": \"4\", \"五\": \"5\", \"六\": \"6\", \"七\": \"7\", \"八\": \"8\", \"九\": \"9\", \"十\": \"\"}\n config = configparser.ConfigParser()\n config.read('config/config.ini')\n section = 'mysql'\n conf = {\n 'host': config.get(section, 'host'),\n 'port': config.getint(section, 'port'),\n 'user': config.get(section, 'user'),\n 'passwd': config.get(section, 'password'),\n 'db': config.get(section, 'database'),\n 'charset': config.get(section, 'charset')\n }\n conn = pymysql.connect(**conf)\n sql_insert = \"\"\"insert into scan_file_info(pic_name, main_type, sub_type,is_answer) values (%s,%s,%s,%s)\"\"\"\n\n for scan_file in os.listdir(source_path):\n print(\"scan_file:\"+scan_file)\n source_file_path = 'testPic/'+scan_file\n img = cv2.imread(source_file_path)\n sp = img.shape\n\n sz1 = sp[0] # height(rows) of image\n sz2 = sp[1] # width(colums) of image\n\n zuoshangX = 0\n zuoshangY = sz1 * 0.93 #高度\n youxiaX = sz2\n youxiaY = sz1\n\n im = Image.open(source_file_path)\n # 设置抠图区域\n box = (zuoshangX, zuoshangY, youxiaX, youxiaY)\n region = im.crop(box)\n newPic = \"target_pic/pic_name.jpg\"\n region.save(newPic, quality=95)\n\n api_result = img_to_str(\"target_pic/pic_name.jpg\")\n words_result = (i['words'] for i in api_result['words_result']) # 文本内容\n s = '\\n'.join(words_result) #\n print(s)\n\n # pattern0 = \"([答案])\"\n # m0 = re.search(pattern0, s)\n if ( \"答案\" in s ) :\n # print(\"答案\")\n # return\n pattern1 = \"([一二三四五六七八九十])\"\n m1 = re.search(pattern1, s)\n sub_type = num_dict[m1.group()] # 次数据\n\n pattern2 = \"([123456789])\"\n m2 = re.search(pattern2, s) # 主数据\n main_type = m2.group()\n is_answer = 1 #是答案\n print(sub_type,main_type)\n\n #左右分离图片\n im = Image.open(source_file_path)\n im.crop((0, 0,2472/2, 3496)).save('target_pic/target_a_1.jpg')\n im.crop((2472/2+1, 0,2472, 3496)).save('target_pic/target_a_2.jpg')\n\n width = img.shape[1]\n channels = img.shape[2]\n #查找页面中的分割线\n for row in range(img.shape[0]): # 图片的高\n for col in range(img.shape[1]): # 图片的宽\n pixel = 0\n for channel in range(channels):\n pixel = pixel * 1000 + img[row][col][channel]\n pixel = int(pixel / 10000000) * 1000000\n print(row,col,pixel)\n print(\"\\n\")\n #此功能没有完成,可以划一个部分然后再寻找\n\n else:\n pattern1 = \"([一二三四五六七八九十])\"\n m1 = re.search(pattern1, s)\n main_type = num_dict[m1.group()] # 主数据\n\n pattern2 = \"([123456789])\"\n m2 = re.search(pattern2,s) #次数据\n sub_type = m2.group()\n is_answer = 0 #不是答案\n\n #在这里把\"图片名称\"及\"图片序号\"写进去。\n cursor = conn.cursor()\n cursor.execute(sql_insert, (scan_file, main_type, sub_type, is_answer))\n cursor.close()\n conn.commit()\n\nif __name__ == '__main__':\n source_path = \"testPic/\"\n read_source(source_path)" }, { "alpha_fraction": 0.5897436141967773, "alphanum_fraction": 0.5897436141967773, "avg_line_length": 20.33333396911621, "blob_id": "db697d552409c5a7df67cf87418c106217743d3e", "content_id": "26378c30b20fe7c36df3731d103238c52107cd81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/getContent.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "\n\n#使用另一种方法,获得某网站的内容\ndef getContent(url):\n print(url)\n\nif __name__ == '__main__':\n source_file = \"text/source.txt\"\n file = open(source_file)\n for url in file:\n getContent(url)\n\n" }, { "alpha_fraction": 0.5944055914878845, "alphanum_fraction": 0.6048951148986816, "avg_line_length": 30.021739959716797, "blob_id": "9d1a616d11c7546d3a9bd80766ed0a140cc4c801", "content_id": "dae0d0d06530f8ec2eed5354e18add9c957845aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1622, "license_type": "no_license", "max_line_length": 79, "num_lines": 46, "path": "/10.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "# #获得图像的每个像素的值\n# import os\n# import cv2\n# import numpy as np\n\n# # np.set_printoptions(threshold=np.nan) # 这里多加一行代码,避免控制台输出省略号的问题\n# \u001b\n# filepath = '/Users/leaf/project/scanWords/testPic/'\n# for filename in os.listdir(filepath):\n# print(filename)\n# # img = cv2.imread('/Users/leaf/project/scanWords/testPic/%s'%filename)\n# # print(img.shape[0]+\"////\"+img.shape[1])\n# # for x in range(img.shape[0]): # 图片的高\n# # for y in range(img.shape[1]): # 图片的宽\n# # px = img[x,y]\n# # print(px) # 这样就能得到每个点的bgr值\n\n\nimport os\nimport cv2\nimport math\nimport random\nimport numpy as np\nfrom scipy import misc, ndimage\n\n\ndef access_pixels(frame):\n print(frame.shape) #shape内包含三个元素:按顺序为高、宽、通道数\n height = frame.shape[0]\n weight = frame.shape[1]\n channels = frame.shape[2]\n print(\"weight : %s, height : %s, channel : %s\" %(weight, height, channels))\n \n for row in range(height): #遍历高\n for col in range(weight): #遍历宽\n for c in range(channels): #便利通道\n pv = frame[row, col, c] \n frame[row, col, c] = 255 - pv #全部像素取反,实现一个反向效果\n cv.imshow(\"fanxiang\", frame)\n \nfilepath = '/Users/leaf/project/scanWords/testPic/'\nfor filename in os.listdir(filepath):\n src = cv2.imread('/Users/leaf/project/scanWords/testPic/%s'%filename)\n cv.imshow(\"Picture\", src)\n access_pixels(src)\n cv.waitKey(0)\n\n\n\n" }, { "alpha_fraction": 0.6160164475440979, "alphanum_fraction": 0.6160164475440979, "avg_line_length": 18.520000457763672, "blob_id": "17f18464a37cc83fe39ff24d595e8a4d5f5be67f", "content_id": "f0f0611afdad9f9c37d04e0ad859526125e32a30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "no_license", "max_line_length": 49, "num_lines": 25, "path": "/3.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#基于百度ocr的测试\nfrom aip import AipOcr\nimport configparser\n\nconfig = {\n 'appId': '',\n 'apiKey': '',\n 'secretKey': ''\n}\n \nclient = AipOcr(**config)\n \ndef get_file_content(file):\n with open(file, 'rb') as fp:\n return fp.read()\n \ndef img_to_str(image_path):\n image = get_file_content(image_path)\n # 通用文字识别(可以根据需求进行更改)\n result = client.basicGeneral(image)\n return result\n\nif __name__ == '__main__':\n text = img_to_str('sourceImages/testOcr.png')\n print(text)" }, { "alpha_fraction": 0.5313678979873657, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 31.623077392578125, "blob_id": "d48a18e0a781769af67922fd92efaa6e7c2ee1de", "content_id": "36b78d7451d7a7a2f9d26507645ba54875284db8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5532, "license_type": "no_license", "max_line_length": 72, "num_lines": 130, "path": "/11.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2 #或者用import cv2(由于库的安装方式不同)\nimport math\n\ndef access_pixels1(img):\n \"\"\"遍历图像每个像素的每个通道\"\"\"\n # print(img.shape) #打印图像的高,宽,通道数(返回一个3元素的tuple)\n\n height = img.shape[0] #将tuple中的元素取出,赋值给height,width,channels\n width = img.shape[1]\n channels = img.shape[2]\n\n height -= 5\n width = int( width / 100 ) * 100 # 2400 #int(width)\n # print(\"height:%s,width:%s,channels:%s\" % (height,width,channels))\n oldPixelColor = 0\n\n pixelColor = 0\n #取得右上角的点的坐标的RGB的值\n # height = 20\n # width -= 10\n # for channel in range(channels): #遍历每个通道(三个通道分别是BGR)\n # pixelColor = pixelColor * 1000 + img[height][width][channel]\n # return (0,pixelColor)\n # 190190178 --> 右上角的,没有内容的值是这个数字 (10,width - 20)\n # 187188178 --> 右上角的这个数字是(10,width - 10)\n oldPixelColor = 187188178\n\n # for row in range(10,height,5000): #遍历每一行\n row = 15\n for col in range(width,0,-10): #遍历每一列\n pixelColor = 0 ; # 计算RGB的值\n for channel in range(channels): #遍历每个通道(三个通道分别是BGR)\n pixelColor = pixelColor * 1000 + img[row][col][channel]\n # minColor = abs ( pixelColor - oldPixelColor ) \n # print(pixelColor)\n if ( pixelColor > 250250250) :\n return(row,col)\n # oldPixelColor = pixelColor\n # if ( minColor > 20000000 and col != width) : \n # endRow = row\n # endCol = col\n # return (endRow,endCol)\n # print(pixelColor)\n print(\"\\n\")\n return(row,col)\n # img[row][col][channel] = 255 - img[row][col][channel] \n #通过数组索引访问该元素,并作出处理\n # cv2.imshow(\"processed img\",img) #将处理后的图像显示出来\n\n\n\ndef access_pixels2(img):\n \"\"\"遍历图像每个像素的每个通道\"\"\"\n # print(img.shape) #打印图像的高,宽,通道数(返回一个3元素的tuple)\n\n height = img.shape[0] #将tuple中的元素取出,赋值给height,width,channels\n width = img.shape[1]\n channels = img.shape[2]\n\n height -= 5\n width = int( width / 100 ) * 100 # 2400 #int(width)\n # print(\"height:%s,width:%s,channels:%s\" % (height,width,channels))\n oldPixelColor = 0\n\n pixelColor = 0\n oldPixelColor = 187188178\n\n # for row in range(10,height,5000): #遍历每一行\n col = width - 5 #行\n #col ->height = 3496\n #row ->width = 2472 \n #row = 行,col = 列\n for row in range(0,200,1): #遍历每一列\n pixelColor = 0 ; # 计算RGB的值\n for channel in range(channels): #遍历每个通道(三个通道分别是BGR)\n pixelColor = pixelColor * 1000 + img[row][col][channel]\n # minColor = abs ( pixelColor - oldPixelColor ) \n # print(row,col,pixelColor)\n if ( pixelColor > 239000000):\n return(row,col)\n # print(row,col,pixelColor)\n print(\"\\n\")\n # img[row][col][channel] = 255 - img[row][col][channel] \n #通过数组索引访问该元素,并作出处理\n # cv2.imshow(\"processed img\",img) #将处理后的图像显示出来\n \n#上述自定义函数的功能是像素取反,当然,opencv自带像素取反方法bitwise_not(),不需要这么麻烦\ndef inverse(img):\n \"\"\"此函数与access_pixels函数功能一样\"\"\"\n dst = cv2.bitwise_not(img)\n cv2.imshow(\"inversed_img\",dst)\n \n \ndef create_img():\n \"\"\"#创建一张三通道图像\"\"\"\n img = np.zeros([600,800,3],dtype=np.uint8)\n #创建高600像素,宽800像素,每个像素有BGR三通道的数组(图像)\n #由于元素都在0~255之内,规定数组元素类型为uint8已足够\n img[:,:,2] = np.ones([600,800])*255\n #img[:,:,2]是一种切片方式,冒号表示该维度从头到尾全部切片取出\n #所以img[:,:,2]表示切片取出所有行,所有列的第三个通道(索引为2)\n #右侧首先创建了一个600X800的二维数组,所有元素初始化为1,再乘上255,即所有元素变为255\n #注意右侧600X800的二维数组与左侧切片部分形状相同,所以可以赋值\n #即所有行,所有列的第三个通道(R)的值都变为255,一二通道(BG)仍为0,即所有像素变为红色BGR(0,0,255)\n cv2.imshow(\"created_img\",img)\n \n \ndef create_img_1():\n \"\"\"创建一张单通道图像\"\"\" \n img = np.zeros([400,400,1],dtype=np.uint8)\n #高400像素,宽400像素,单通道\n #仍是三维数组,不过第三个维度长度为1,用来表示像素的灰度(0~255)\n img[:,:,0] = np.ones([400,400])*127\n #切片取出所有行所有列的第一个元素(索引为0),灰度元素,并赋值为127\n cv2.imshow(\"created_img1\",img)\n \ndef get_angle(source_file):\n src = cv2.imread(source_file) #读取图像\n t1 = cv2.getTickCount() #记录下起始时刻\n y1,x1 = access_pixels1(src) #访问图像的每个元素并处理\n y2,x2 = access_pixels2(src) #访问图像的每个元素并处理\n angle = math.atan( abs(y2-y1)/abs(x2-x1)) ;\n return angle\n\nif __name__ == '__main__':\n source_path = \"testPic/2019-12-20-0101.jpg\"\n pic_angle = get_angle(source_path)\n print(pic_angle)" }, { "alpha_fraction": 0.5622895359992981, "alphanum_fraction": 0.7171717286109924, "avg_line_length": 28.600000381469727, "blob_id": "dc3dd249b424572f20a3317f990435caeb21571f", "content_id": "8b7a61b9ea5b19d716fedb7b5230f0006d865096", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 52, "num_lines": 10, "path": "/17.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#正确的进行旋转角度\nfrom PIL import Image \nfrom numpy import * \n \nfile_path = 'testPic/2019-12-20-0101.jpg'\npil_im = Image.open(file_path) \nsource_route = 0.006277944427853939 * 360\npil_im = pil_im.rotate(source_route) \nnew_file_path = 'testPic/target_2019-12-20-0101.jpg'\npil_im.save(new_file_path)\n\n" }, { "alpha_fraction": 0.4677419364452362, "alphanum_fraction": 0.5645161271095276, "avg_line_length": 19.5, "blob_id": "dfbd46bdbf945b72cbc4bb55603d35b9bbd0444e", "content_id": "8380b75623b9eb942591a38043ae57b639adb553", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/13.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "width = 100\n# for col in range(width,0,-1): #遍历每一列\n # print(col)\nlista = [1,2,4,5]\nfor i in range(5,2,-1):\n print(i) " }, { "alpha_fraction": 0.6973684430122375, "alphanum_fraction": 0.7105262875556946, "avg_line_length": 29.399999618530273, "blob_id": "63d28ce553a7b73668fa68a2cff6a0be809a3938", "content_id": "2c433335f453e852e983ecdd0e0cb5edce8e73cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 45, "num_lines": 5, "path": "/bin/django-admin.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#!/Users/leaf/project/scanWords/bin/python3.6\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n" }, { "alpha_fraction": 0.3001987934112549, "alphanum_fraction": 0.31610336899757385, "avg_line_length": 19.95833396911621, "blob_id": "a90fb761082b630e25f3a0c1da4f6839a292d4be", "content_id": "1bdd8b337c15785346d16d07b5e1d34e5f037ef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 36, "num_lines": 24, "path": "/zf.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#去重\ndef qch(zf):\n zf.replace(\")\",\"\")\n zf.replace(\"(\",\"\")\n zf = zf.strip()\n l = len(zf)\n for i in range(2,l):\n if ( l % i == 0 ):\n m = 0\n jzf = zf[0:i]\n for j in range(0,l-1,i):\n xzf = zf[j:j+i]\n if ( xzf != jzf ):\n m = 1\n break\n if ( m == 0 ):\n return xzf\n\n print(\"\\n\")\n\nif __name__ == '__main__':\n zf = 'WLWLWL'\n tl = qch(zf)\n print(tl)\n" }, { "alpha_fraction": 0.6435331106185913, "alphanum_fraction": 0.6466876864433289, "avg_line_length": 18.875, "blob_id": "1166ab78881909d5479a291afc3646c646f284f3", "content_id": "33b055d41deff3e74bf4382b2b089e37cfeb17c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 337, "license_type": "no_license", "max_line_length": 53, "num_lines": 16, "path": "/6.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "# coding:utf-8\nfrom flask import Flask\nimport config\nfrom app import app\n\napp = Flask(__name__)\napp.config.from_object(config.py)\n\n# 通过配置文件加载配置\[email protected](\"/\")\ndef index():\n # appid_content = current_app.config.get(\"appId\")\n return \"hello flask\" + app.config['appId']\n \nif __name__ == '__main__':\n app.run()" }, { "alpha_fraction": 0.5868932008743286, "alphanum_fraction": 0.6053398251533508, "avg_line_length": 24.762500762939453, "blob_id": "edd3cd9933b3349ea5e36e44657940b2fd5334fb", "content_id": "b922cbcb9409d5f8d4f922016e1a3ff6469d5c61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2220, "license_type": "no_license", "max_line_length": 79, "num_lines": 80, "path": "/21.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "# _*_ coding:utf-8 _*_\n#从图片上扣一块下来。并且分析哪个图是哪张纸。\n__author__ = 'admin'\nfrom PIL import Image, ImageDraw, ImageFont\nimport cv2\nfrom aip import AipOcr\nimport re\nimport configparser\nimport os\nimport pymysql\nimport configparser\n\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config/config.ini\", encoding=\"utf-8\")\nbaiduAppId = config.get(\"BaiduOCR\", \"appId\")\nbaiduApiKey = config.get(\"BaiduOCR\", \"apiKey\")\nbaiduSecretKey = config.get(\"BaiduOCR\", \"secretKey\")\n\nconfig = {\n 'appId': baiduAppId,\n 'apiKey': baiduApiKey,\n 'secretKey': baiduSecretKey\n}\n\n\nclient = AipOcr(**config)\n\ndef get_file_content(file):\n with open(file, 'rb') as fp:\n return fp.read()\n\ndef img_to_str(image_path):\n image = get_file_content(image_path)\n # 通用文字识别(可以根据需求进行更改)\n result = client.basicGeneral(image)\n return result\n\n# if __name__ == '__main__':\n#source_path 源文件目录\ndef read_source(source_path):\n for scan_file in os.listdir(source_path):\n source_file_path = 'testPic/'+scan_file\n img = cv2.imread(source_file_path)\n sp = img.shape\n\n sz1 = sp[0] # height(rows) of image\n sz2 = sp[1] # width(colums) of image\n\n zuoshangX = 0\n zuoshangY = sz1 * 0.93\n youxiaX = sz2\n youxiaY = sz1\n\n im = Image.open(source_file_path)\n # 设置抠图区域\n box = (zuoshangX, zuoshangY, youxiaX, youxiaY)\n # 从图片上抠下此区域\n region = im.crop(box)\n newPic = \"target_pic/pic_name.jpg\"\n region.save(newPic, quality=95)\n\n api_result = img_to_str(\"target_pic/pic_name.jpg\")\n words_result = (i['words'] for i in api_result['words_result']) # 文本内容\n s = '\\n'.join(words_result) #\n # print(api_result)\n\n pattern1 = \"([一二三四五六七八九十])\"\n m1 = re.search(pattern1,s)\n dataNO1 = m1.group() #主数据\n print ( dataNO1 )\n\n pattern2 = \"([123456789])\"\n m2 = re.search(pattern2,s) #次数据\n dataNO2 = m2.group()\n print ( dataNO2 )\n\nif __name__ == '__main__':\n source_path = \"testPic/\"\n read_source(source_path)" }, { "alpha_fraction": 0.5728517770767212, "alphanum_fraction": 0.5990037322044373, "avg_line_length": 26.724138259887695, "blob_id": "cdeb3cb4e7ded09d73cb60364bc181ecdcd25b48", "content_id": "b1ec7d0c72b4be1266a0743a35019aad2c6b0474", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 829, "license_type": "no_license", "max_line_length": 81, "num_lines": 29, "path": "/2.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "#分割图片\nfrom PIL import Image\n\ndef cut_image(image,count):\n width, height = image.size\n item_width = int(width / count) \n item_height = int(height / count)\n box_list = []\n # (left, upper, right, lower)\n for i in range(0,count):\n for j in range(0,count):\n box = (j*item_width,i*item_height,(j+1)*item_width,(i+1)*item_height)\n box_list.append(box)\n image_list = [image.crop(box) for box in box_list]\n return image_list\n\n#保存\ndef save_images(image_list):\n index = 1\n for image in image_list:\n image.save(str(index) + '.png', 'PNG')\n index += 1\n\nif __name__ == '__main__':\n file_path = \"./sourceImages/WX20191218-224733.png\" #图片保存的地址\n image = Image.open(file_path)\n\n image_list = cut_image(image,2)\n save_images(image_list)" }, { "alpha_fraction": 0.5320388078689575, "alphanum_fraction": 0.566990315914154, "avg_line_length": 25.41025733947754, "blob_id": "901eff6fb46c4ccd2bfa4dfd8ef1f3c1ca1f5bae", "content_id": "bf428b581fe93f6f4dbdc5707a9cc3618c5acbc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1170, "license_type": "no_license", "max_line_length": 96, "num_lines": 39, "path": "/16.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\n\nimagepath = 'sourcePic.png'\nimg = cv2.imread(imagepath)\nimage, contours, _ = cv2.findContours(img, 2, 2)\n\nfor cnt in contours:\n\n # 最小外界矩形的宽度和高度\n width, height = cv2.minAreaRect(cnt)[1]\n\n if width* height > 100:\n # 最小的外接矩形\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect) # 获取最小外接矩形的4个顶点\n box = np.int0(box)\n\n if 0 not in box.ravel():\n\n '''绘制最小外界矩形\n for i in range(4):\n cv2.line(image, tuple(box[i]), tuple(box[(i+1)%4]), 0) # 5\n '''\n # 旋转角度\n theta = cv2.minAreaRect(cnt)[2]\n if abs(theta) <= 45:\n print('图片的旋转角度为%s.'%theta)\n angle = theta\n\n# 仿射变换,对图片旋转angle角度\nh, w = img.shape\ncenter = (w//2, h//2)\nM = cv2.getRotationMatrix2D(center, angle, 1.0)\nrotated = cv2.warpAffine(img, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n\n# 保存旋转后的图片\ncv2.imwrite('F://CHN_Char/after_rotated.png', rotated)\n" }, { "alpha_fraction": 0.5709767937660217, "alphanum_fraction": 0.5860857367515564, "avg_line_length": 27.75757598876953, "blob_id": "0c4cbca7d308c260f50cb228dbe83190fac422a5", "content_id": "5878f92c604fcb601ac2437fc466a29c4cc45ca7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3052, "license_type": "no_license", "max_line_length": 114, "num_lines": 99, "path": "/22.py", "repo_name": "Yetianyun/oPictures", "src_encoding": "UTF-8", "text": "# _*_ coding:utf-8 _*_\n#将数据存入数据库,本代码是处理\"题目\"的\n__author__ = 'admin'\nfrom PIL import Image, ImageDraw, ImageFont\nimport cv2\nfrom aip import AipOcr\nimport re\nimport configparser\nimport os\nimport pymysql\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read(\"config/config.ini\", encoding=\"utf-8\")\nbaiduAppId = config.get(\"BaiduOCR\", \"appId\")\nbaiduApiKey = config.get(\"BaiduOCR\", \"apiKey\")\nbaiduSecretKey = config.get(\"BaiduOCR\", \"secretKey\")\n\nconfig = {\n 'appId': baiduAppId,\n 'apiKey': baiduApiKey,\n 'secretKey': baiduSecretKey\n}\n\n\nclient = AipOcr(**config)\n\ndef get_file_content(file):\n with open(file, 'rb') as fp:\n return fp.read()\n\ndef img_to_str(image_path):\n image = get_file_content(image_path)\n # 通用文字识别(可以根据需求进行更改)\n result = client.basicGeneral(image)\n return result\n\n# if __name__ == '__main__':\n#source_path 源文件目录\ndef read_source(source_path):\n num_dict = {\"一\": \"1\", \"二\": \"2\", \"三\": \"3\", \"四\": \"4\", \"五\": \"5\", \"六\": \"6\", \"七\": \"7\", \"八\": \"8\", \"九\": \"9\", \"十\": \"\"}\n config = configparser.ConfigParser()\n config.read('config/config.ini')\n section = 'mysql'\n conf = {\n 'host': config.get(section, 'host'),\n 'port': config.getint(section, 'port'),\n 'user': config.get(section, 'user'),\n 'passwd': config.get(section, 'password'),\n 'db': config.get(section, 'database'),\n 'charset': config.get(section, 'charset')\n }\n conn = pymysql.connect(**conf)\n sql_insert = \"\"\"insert into scan_file_info(pic_name, main_type, sub_type) values (%s,%s,%s)\"\"\"\n\n for scan_file in os.listdir(source_path):\n source_file_path = 'testPic/'+scan_file\n img = cv2.imread(source_file_path)\n sp = img.shape\n\n sz1 = sp[0] # height(rows) of image\n sz2 = sp[1] # width(colums) of image\n\n zuoshangX = 0\n zuoshangY = sz1 * 0.93\n youxiaX = sz2\n youxiaY = sz1\n\n im = Image.open(source_file_path)\n # 设置抠图区域\n box = (zuoshangX, zuoshangY, youxiaX, youxiaY)\n # 从图片上抠下此区域\n region = im.crop(box)\n newPic = \"target_pic/pic_name.jpg\"\n region.save(newPic, quality=95)\n\n api_result = img_to_str(\"target_pic/pic_name.jpg\")\n words_result = (i['words'] for i in api_result['words_result']) # 文本内容\n s = '\\n'.join(words_result) #\n # print(api_result)\n\n pattern1 = \"([一二三四五六七八九十])\"\n m1 = re.search(pattern1, s)\n main_type = num_dict[m1.group()] # 主数据\n\n pattern2 = \"([123456789])\"\n m2 = re.search(pattern2,s) #次数据\n sub_type = m2.group()\n\n #在这里把\"图片名称\"及\"图片序号\"写进去。\n cursor = conn.cursor()\n cursor.execute(sql_insert, (scan_file, main_type, sub_type))\n\n cursor.close()\n conn.commit()\n\nif __name__ == '__main__':\n source_path = \"testPic/\"\n read_source(source_path)" } ]
22
mrhm2000/Self-Driving-Car-Semantic-Segmentation
https://github.com/mrhm2000/Self-Driving-Car-Semantic-Segmentation
9df629146d7d54f14360bd933880a21037e45159
70e5207818286b7f5ab70f18c8ca42dd6e0abc84
91c9f54ce3788278de9274d15c8b189d195f6ee0
refs/heads/master
2020-03-23T16:31:36.838203
2018-07-21T13:29:35
2018-07-21T13:29:35
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5300084352493286, "alphanum_fraction": 0.7480980753898621, "avg_line_length": 28.209877014160156, "blob_id": "d69fe05f0844cfcfc797faada7a5b18863955866", "content_id": "6ea335808ecec726ea0311260e4b614ad12c2cfe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2366, "license_type": "permissive", "max_line_length": 274, "num_lines": 81, "path": "/README.md", "repo_name": "mrhm2000/Self-Driving-Car-Semantic-Segmentation", "src_encoding": "UTF-8", "text": "# Semantic Segmentation\n### Goal\nProject goal is to label road images using a Fully Convolutional Network based on pre-trained VGG-16 image classifier. The model will train and test using KITTI data set.\n\n### Environment\n\n#### GPU\nThe python code 'main.py' will check the GPU availability.\n\n#### Dependency\n - [Python 3](https://www.python.org/)\n - [TensorFlow](https://www.tensorflow.org/)\n - [NumPy](http://www.numpy.org/)\n - [SciPy](https://www.scipy.org/)\n\n\n#### Dataset\nDownload the [Kitti Road dataset](http://www.cvlibs.net/datasets/kitti/eval_road.php) from [here](http://www.cvlibs.net/download.php?file=data_road.zip). Extract the dataset in the `data` folder. This will create the folder `data_road` with all the training a test images.\n\n\n### Implementation\nPretrained VGG-16 is converted to match data set in this case model is set to identify two classes : road and non road. The model loss is calculated using cross-entropy, and use Adam as an optimizer.\n\n\n##### Run\nRun the following command to run the project:\n```\npython main.py\n```\n### Result\nResult of the train model images are save on images folder. Parameter adjustment and loss are recorded.\n\n#### Parameter\n\nModel was train on various setting. I settled with 20 epoch, batch_size = 5, dropout 0.5, learning rate = 0.0001, kernel initializer weight = 0.01 and kernel regularizer weight 1e-3.\n\n#### Loss\nI experimented with number of setting and loss average are tend to getting lower on each Epoch iterations.\n\nNo|Epoch | Loss average | Dropout | Learning Rate\n---|---|---|---|---\n1|20|0.033|0.5|0.00009\n2|20|0.027|0.5|0.0001\n3|20|0.038|0.6|0.0001\n4|20|0.044|0.5|0.0002\n\n\nTraining loss record on row 2 setting above:\n\nEpoch | Loss average\n---|---\n1|0.52895992968616812\n2|0.15678886208554793\n3|0.12183833970078106\n4|0.094745325695337917\n5|0.090101418181740001\n6|0.075108099279218701\n7|0.06586082992625647\n8|0.06244756740614258\n9|0.052465703370499203\n10|0.048280684887592136\n11|0.04552585021432104\n12|0.042489964804001926\n13|0.040817561944753958\n14|0.040140913767290523\n15|0.03672626222772845\n16|0.033920377685592092\n17|0.032669993567055668\n18|0.03021795659101215\n19|0.029408452486426664\n20|0.027688115144726532\n\n\n\n#### Images\n\n![image1](/images/um_000002.png)\n![image2](/images/um_000045.png)\n![image3](/images/um_000066.png)\n![image4](/images/uu_000038.png)\n![image5](/images/uu_000055.png)\n" }, { "alpha_fraction": 0.6587749719619751, "alphanum_fraction": 0.673436164855957, "avg_line_length": 38.51931381225586, "blob_id": "20fa11fb2f87c93a5b8e9d01efd639fe395794cf", "content_id": "b44b8859264b75df1769763e5999f4c07c4b9ec0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9208, "license_type": "permissive", "max_line_length": 146, "num_lines": 233, "path": "/main.py", "repo_name": "mrhm2000/Self-Driving-Car-Semantic-Segmentation", "src_encoding": "UTF-8", "text": "import os.path\nimport tensorflow as tf\nimport helper\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\n\n# -------------------------\n# User Data Constant\n# -------------------------\n\nnum_classes = 2\nimage_shape = (160, 576)\ndata_dir = './data'\nruns_dir = './runs'\nmodel_dir = './models'\n\nepochs = 20\nbatch_size = 5\nvalKeepProb = 0.5\nvalLearRate = 0.0001\n\n#Kernel Weight\nstdWeigth = 0.01\nregWeight = 1e-3\n\n#Total Loss logs\ntotalLossLog=[]\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n \"\"\"\n # TODO: Implement function\n # Use tf.saved_model.loader.load to load the model and weights\n\n tf.saved_model.loader.load(sess, ['vgg16'], vgg_path)\n\n graph = tf.get_default_graph()\n input_layer = graph.get_tensor_by_name('image_input:0')\n keep_prob = graph.get_tensor_by_name('keep_prob:0')\n layer3 = graph.get_tensor_by_name('layer3_out:0')\n layer4 = graph.get_tensor_by_name('layer4_out:0')\n layer7 = graph.get_tensor_by_name('layer7_out:0')\n\n return input_layer, keep_prob, layer3, layer4, layer7\n\n\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer7_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer3_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n \"\"\"\n\n # TODO: Implement function\n # 1x1 convolution VGG layer 7\n conv1x1_7 = tf.layers.conv2d(vgg_layer7_out,num_classes,1,padding = 'same',\n\t\t\t\tkernel_initializer = tf.random_normal_initializer(stddev=stdWeigth),\n\t\t\t\tkernel_regularizer= tf.contrib.layers.l2_regularizer(regWeight),name='conv1x1_7')\n # Upsample\n firstUsample = tf.layers.conv2d_transpose(conv1x1_7,num_classes,4,strides= (2, 2),padding= 'same',\n\t\t\t\tkernel_initializer = tf.random_normal_initializer(stddev=stdWeigth),\n\t\t\t\tkernel_regularizer= tf.contrib.layers.l2_regularizer(regWeight),name='firstUsample')\n conv1x1_4 = tf.layers.conv2d(vgg_layer4_out,num_classes,1,padding = 'same',\n\t\t\t\tkernel_initializer = tf.random_normal_initializer(stddev=stdWeigth),\n\t\t\t\tkernel_regularizer= tf.contrib.layers.l2_regularizer(regWeight),name='conv1x1_4')\n # Skip layer\n firstSkip = tf.add(firstUsample, conv1x1_4, name='firstSkip')\n\n # Upsample\n secondUsample = tf.layers.conv2d_transpose(firstSkip,num_classes,4,strides= (2, 2),padding= 'same',\n\t\t\t\tkernel_initializer = tf.random_normal_initializer(stddev=stdWeigth),\n\t\t\t\tkernel_regularizer= tf.contrib.layers.l2_regularizer(regWeight),name='secondUsample')\n conv1x1_3 = tf.layers.conv2d(vgg_layer3_out,num_classes,1,padding = 'same',\n\t\t\t\tkernel_initializer = tf.random_normal_initializer(stddev=stdWeigth),\n\t\t\t\tkernel_regularizer= tf.contrib.layers.l2_regularizer(regWeight),name='conv1x1_3')\n\t# Skip layer\n secondSkip = tf.add(secondUsample, conv1x1_3, name='secondSkip')\n\n # Upsample\n nnFinal = tf.layers.conv2d_transpose(secondSkip, num_classes, 16,strides= (8, 8),padding= 'same',\n kernel_initializer = tf.random_normal_initializer(stddev=stdWeigth),\n kernel_regularizer= tf.contrib.layers.l2_regularizer(regWeight),name='nnFinal')\n\n return nnFinal\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n # TODO: Implement function\n # create logits : 2D tensor where each row represents a pixel and each column a class.\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n correct_label = tf.reshape(correct_label, (-1,num_classes))\n\n # create loss function.\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logits, labels= correct_label))\n # Define optimizer. Adam in this case to have variable learning rate.\n optimizer = tf.train.AdamOptimizer(learning_rate= learning_rate)\n\n # Apply optimizer to the loss function.\n train_op = optimizer.minimize(cross_entropy_loss)\n\n return logits, train_op, cross_entropy_loss\n\n\n\ndef train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\"\n # TODO: Implement function\n sess.run(tf.global_variables_initializer())\n\n print('Training Start: {} epochs'.format(epochs))\n\n for epoch in range(epochs):\n itercount=0\n lossLog=[]\n print('Epoch : {}'.format(epoch + 1))\n for image, label in get_batches_fn(batch_size):\n _, loss = sess.run([train_op, cross_entropy_loss],\n feed_dict={\n input_image: image,\n correct_label: label,\n keep_prob: valKeepProb,\n learning_rate: valLearRate\n })\n itercount+=1\n print(\"--> Iteration: \", itercount, \" loss:\", loss)\n lossLog.append(loss)\n thislosses=sum(lossLog)/len(lossLog)\n print(\"Epoch: \", epoch + 1, \" --> loss average: \", thislosses)\n totalLossLog.append(thislosses)\n\n print('Training Done')\n\n\n\ndef run():\n\n tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper.maybe_download_pretrained_vgg(data_dir)\n\n # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n # You'll need a GPU with at least 10 teraFLOPS to train on.\n # https://www.cityscapes-dataset.com/\n\n with tf.Session() as sess:\n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n # OPTIONAL: Augment Images for better results\n # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n\n # TODO: Build NN using load_vgg, layers, and optimize function\n\n # Placeholders\n correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n # Getting layers from vgg.\n input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(sess, vgg_path)\n\n # Creating new layers.\n layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)\n\n # Creating loss and optimizer operations.\n logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)\n\n # TODO: Train NN using the train_nn function\n\n saver = tf.train.Saver()\n\n train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate)\n\n # TODO: Save inference data using helper.save_inference_samples\n helper.save_inference_samples(model_dir, runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image, saver)\n\n # OPTIONAL: Apply the trained model to a video\n\ndef testRun():\n\ttests.test_load_vgg(load_vgg, tf)\n\ttests.test_layers(layers)\n\ttests.test_optimize(optimize)\n\ttests.test_train_nn(train_nn)\n\nif __name__ == '__main__':\n testRun()\n run()\n print(totalLossLog)\n" } ]
2
nyxojaele/Possibilities
https://github.com/nyxojaele/Possibilities
673390650e776f7052e3aed86e16cc8ad2d9f021
00c763d8dc53574439a66d98500121f7fe0e5f1e
f165f6f68f37265592c2045afef33c1d61a30ecb
refs/heads/master
2020-05-05T14:00:17.574067
2012-01-22T22:46:13
2012-01-22T22:46:13
3,134,039
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6792738437652588, "alphanum_fraction": 0.6838123798370361, "avg_line_length": 19.387096405029297, "blob_id": "e61768b0879a29b57b64fa74fd0de6b70596bd1b", "content_id": "a5347c95a42b54a210cb37c40a595dc7060c4334", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 661, "license_type": "no_license", "max_line_length": 71, "num_lines": 31, "path": "/Kingdom/Cinder/Pilot/www/checkAccount.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n//Functions\r\ninclude_once 'scripts/account_functions.php';\r\ninclude_once 'scripts/session_functions.php';\r\n//Startup\r\ninclude_once 'scripts/connect_to_mysql.php';\r\n\r\n$username = $_POST['username'];\r\n$success = 0;\r\n\r\n\r\n//Check DB Version\r\n$expectedDBVersion = $_POST['DBVersion'];\r\nif (!CheckDBVersion($expectedDBVersion))\r\n{\r\n\t//Invalid DB Version\r\n\tprint \"Success=$success&Error='Invalid DB version'\";\r\n\treturn;\r\n}\r\n\r\n\r\n$exists = AccountExists($username);\r\nif ($exists == -1)\r\n{\r\n\tprint \"Success=$success&Error='Unable to check account at this time'\";\r\n\treturn;\r\n}\r\n$success = 1;\r\nprint \"Success=$success&Result=$exists&Username=$username\";\r\nreturn;\r\n?>" }, { "alpha_fraction": 0.6174079179763794, "alphanum_fraction": 0.6217120885848999, "avg_line_length": 22.6235294342041, "blob_id": "a26ec78772a000b4058306e50bb05b0107dede39", "content_id": "a6d9fddc8147ec36ecd66856548b7d20cc953f89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2091, "license_type": "no_license", "max_line_length": 102, "num_lines": 85, "path": "/Kingdom/Cinder/Pilot/www/resources.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n//Functions\r\ninclude_once 'scripts/session_functions.php';\r\n//Startup\r\ninclude_once 'scripts/connect_to_mysql.php';\r\n\r\n$requestID = $_POST['requestID'];\r\n$sessionID = $_POST['sessionID'];\r\n$action = $_POST['action'];\t\t\t//'get' or 'set'\r\n\r\nif (!TouchValidSession($sessionID))\r\n{\r\n print \"Action=$action&RequestID=$requestID&Success=0&Error='Session timed out.'\";\r\n\treturn;\r\n}\r\n$userID = GetUserForSession($sessionID);\r\nif (!$userID)\r\n{\r\n\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Unable to retrieve user for session.'\";\r\n\treturn;\r\n}\r\n\r\nif ($action == 'getall')\r\n{\r\n\t//Retrieve a string that represents all info about all resources for the user\r\n\t$resourceResults = mysql_query(\"\r\n\t\t\tSELECT\r\n\t\t\t\tp_ID,\r\n\t\t\t\tType,\r\n\t\t\t\tValue\r\n\t\t\tFROM\r\n\t\t\t\ttblResources\r\n\t\t\tWHERE\r\n\t\t\t\tUserID = $userID\r\n\t\t\t\");\r\n\tif (!$resourceResults)\r\n\t{\r\n\t\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Error in resource getall query.'\";\r\n\t\treturn;\r\n\t}\r\n\t$resourceString = \"\";\r\n\t$resourceCount = mysql_num_rows($resourceResults);\r\n\tif ($resourceCount != 0)\r\n\t{\r\n\t\t$resourceRow = mysql_fetch_assoc($resourceResults);\r\n\t\twhile ($resourceRow)\r\n\t\t{\r\n\t\t\tif ($resourceString != \"\")\r\n\t\t\t\t$resourceString .= \"|\";\r\n\t\t\t$resourceString .= $resourceRow['Type'] . \",\" . $resourceRow['p_ID'] . \",\" . $resourceRow['Value'];\r\n\t\t\t\r\n\t\t\t$resourceRow = mysql_fetch_assoc($resourceResults);\r\n\t\t}\r\n\t}\r\n\tprint \"Action=$action&RequestID=$requestID&Success=1&Result=$resourceString\";\r\n\treturn;\r\n}\r\nelse if ($action == 'set')\r\n{\r\n\t$ID = $_POST['id'];\t\t\t\t\t//p_ID\r\n\tif ($ID == '-1')\r\n\t{\r\n\t\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Invalid resource ID $ID'\";\r\n\t\treturn;\r\n\t}\r\n\t$type = $_POST['type'];\t\t\t\t//Type\r\n\t$value = $_POST['value'];\t\t\t//Value\r\n\tif (!mysql_query(\"\r\n\t\t\tUPDATE\r\n\t\t\t\ttblResources\r\n\t\t\tSET\r\n\t\t\t\tValue = $value\r\n\t\t\tWHERE\r\n\t\t\t\tp_ID = $ID\r\n\t\t\t\tAND Type = $type\r\n\t\t\t\tAND UserID = $userID\r\n\t\t\t\"))\r\n\t{\r\n\t\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Error in resource set query.'\";\r\n\t\treturn;\r\n\t}\r\n\tprint \"Action=$action&RequestID=$requestID&Success=1\";\r\n\treturn;\r\n}\r\n?>" }, { "alpha_fraction": 0.6146630644798279, "alphanum_fraction": 0.620093822479248, "avg_line_length": 21.982248306274414, "blob_id": "c8666eb563991867f93ecffca7af5271dfd24368", "content_id": "3e31e2cc55e466f01062d41e5781cfd50918a67b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 4051, "license_type": "no_license", "max_line_length": 175, "num_lines": 169, "path": "/Kingdom/Cinder/Pilot/www/buildings.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n//Functions\r\ninclude_once 'scripts/session_functions.php';\r\n//Startup\r\ninclude_once 'scripts/connect_to_mysql.php';\r\n\r\n$requestID = $_POST['requestID'];\r\n$sessionID = $_POST['sessionID'];\r\n$action = $_POST['action'];\t\t\t//'place', 'remove', 'move', 'getall'\r\n\r\nif (!TouchValidSession($sessionID))\r\n{\r\n print \"Action=$action&RequestID=$requestID&Success=0&Error='Session timed out.'\";\r\n\treturn;\r\n}\r\n$userID = GetUserForSession($sessionID);\r\nif (!$userID)\r\n{\r\n\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Unable to retrieve user for session.'\";\r\n\treturn;\r\n}\r\n\r\nif ($action == 'getall')\r\n{\r\n\t//Retrieve a string that represents all info about all buildings for the user\r\n\t$buildingsResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tp_ID,\r\n\t\t\tClassTypeID,\r\n\t\t\tPosX,\r\n\t\t\tPosY,\r\n\t\t\tCurrentHealth\r\n\t\tFROM\r\n\t\t\ttblBuildings\r\n\t\tWHERE\r\n\t\t\tUserID = $userID\r\n\t\t\");\r\n\tif (!$buildingsResults)\r\n\t{\r\n\t\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Error in building getall query'\";\r\n\t\treturn;\r\n\t}\r\n\t$buildingString = \"\";\r\n\t$buildingCount = mysql_num_rows($buildingsResults);\r\n\tif ($buildingCount != 0)\r\n\t{\r\n\t\t$buildingRow = mysql_fetch_assoc($buildingsResults);\r\n\t\twhile ($buildingRow)\r\n\t\t{\r\n\t\t\tif ($buildingString != \"\")\r\n\t\t\t\t$buildingString .= \"|\";\r\n\t\t\t$buildingString .= $buildingRow['ClassTypeID'] . \",\" . $buildingRow['p_ID'] . \",\" . $buildingRow['PosX'] . \",\" . $buildingRow['PosY'] . \",\" . $buildingRow['CurrentHealth'];\r\n\t\t\t\r\n\t\t\t$buildingRow = mysql_fetch_assoc($buildingsResults);\r\n\t\t}\r\n\t}\r\n\tprint \"Action=$action&RequestID=$requestID&Success=1&Result=$buildingString\";\r\n\treturn;\r\n}\r\nelse if ($action == 'place')\r\n{\r\n\t$classTypeID = $_POST['classTypeID'];\t//Building class type identifier\r\n\t$toXPos = $_POST['toX'];\t\t\t\t//XPos to place at\r\n\t$toYPos = $_POST['toY'];\t\t\t\t//YPOs to place at\r\n\t$health = $_POST['health'];\t\t\t\t//Current Health\r\n\t\r\n\tif (!PlaceBuilding($userID, $classTypeID, $toXPos, $toYPos, $health))\r\n\t{\r\n\t\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Error in place query'\";\r\n\t\treturn;\r\n\t}\r\n\t$newID = mysql_insert_id();\r\n\tprint \"Action=$action&RequestID=$requestID&Success=1&NewID=$newID\";\r\n}\r\nelse\r\n{\r\n\t$id = $_POST['id'];\r\n\r\n\tif (!CheckBuildingOwnership($userID, $id))\r\n\t{\r\n\t\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Building doesn't belong to this user'\";\r\n\t\treturn;\r\n\t}\r\n\t\r\n\tif ($action == 'remove')\r\n\t{\r\n\t\t$id = $_POST['id'];\t\t\t\t\t//DB ID\r\n\t\tif (!RemoveBuilding($id))\r\n\t\t{\r\n\t\t\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Error in remove query'\";\r\n\t\t\treturn;\r\n\t\t}\r\n\t\tprint \"Action=$action&RequestID=$requestID&Success=1\";\r\n\t}\r\n\telse if ($action == 'move')\r\n\t{\r\n\t\t$id = $_POST['id'];\t\t\t\t\t//DB ID\r\n\t\t$toXPos = $_POST['toX'];\t\t\t//XPos to move to\r\n\t\t$toYPos = $_POST['toY'];\t\t\t//YPos to move to\r\n\t\t\r\n\t\tif (!MoveBuilding($id, $toXPos, $toYPos))\r\n\t\t{\r\n\t\t\tprint \"Action=$action&RequestID=$requestID&Success=0&Error='Error in move query'\";\r\n\t\t\treturn;\r\n\t\t}\r\n\t\tprint \"Action=$action&RequestID=$requestID&Success=1\";\r\n\t}\r\n}\r\nreturn;\r\n\r\n\r\nfunction CheckBuildingOwnership($userID, $id)\r\n{\r\n\t$checkResults = mysql_query(\"\r\n\t\t\tSELECT\r\n\t\t\t\tCOUNT(p_ID)\r\n\t\t\tFROM\r\n\t\t\t\ttblBuildings\r\n\t\t\tWHERE\r\n\t\t\t\tp_ID = $id\r\n\t\t\t\tAND UserID = $userID\r\n\t\t\t\");\r\n\tif (!$checkResults)\r\n\t\t//Error with query\r\n\t\treturn 0;\r\n\t$checkCount = mysql_num_rows($checkResults);\r\n\tif ($checkCount == 0)\r\n\t\treturn 0;\r\n\treturn 1;\r\n\t\r\n}\r\nfunction PlaceBuilding($userID, $classTypeID, $toXPos, $toYPos, $currentHealth)\r\n{\r\n\tif (!mysql_query(\"\r\n\t\t\tINSERT INTO\r\n\t\t\t\ttblBuildings (UserID, ClassTypeID, PosX, PosY, CurrentHealth)\r\n\t\t\tVALUES\r\n\t\t\t\t($userID, $classTypeID, $toXPos, $toYPos, $currentHealth)\r\n\t\t\t\"))\r\n\t\t//Error with query\r\n\t\treturn 0;\r\n\treturn 1;\r\n}\r\nfunction RemoveBuilding($id)\r\n{\r\n\tif (!mysql_query(\"\r\n\t\t\tDELETE FROM\r\n\t\t\t\ttblBuildings\r\n\t\t\tWHERE\r\n\t\t\t\tp_ID = $id\r\n\t\t\t\"))\r\n\t\treturn 0;\r\n\treturn 1;\r\n}\r\nfunction MoveBuilding($id, $toXPos, $toYPos)\r\n{\r\n\tif (!mysql_query(\"\r\n\t\t\tUPDATE\r\n\t\t\t\ttblBuildings\r\n\t\t\tSET\r\n\t\t\t\tPosX = $toXPos,\r\n\t\t\t\tPosY = $toYPos\r\n\t\t\tWHERE\r\n\t\t\t\tp_ID = $id\r\n\t\t\t\"))\r\n\t\treturn 0;\r\n\treturn 1;\r\n}\r\n?>" }, { "alpha_fraction": 0.6336134672164917, "alphanum_fraction": 0.6336134672164917, "avg_line_length": 26.428571701049805, "blob_id": "f2cd23662631c24548bf1cfd3c7517178cf5ddbd", "content_id": "2af6650b8848ddefd98e348cfc29ff6915af4199", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 595, "license_type": "no_license", "max_line_length": 72, "num_lines": 21, "path": "/Kingdom/Cinder/Pilot/www/scripts/connect_to_mysql.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n// Place db host name. Sometimes \"localhost\" but\r\n// sometimes looks like this: >> ??mysql??.someserver.net\r\n$db_host = \"localhost\";\r\n$db_username = \"wonder\";\r\n$db_pass = \"Imagine1234!\";\r\n$db_name = \"playground\";\r\n// Run the connection here\r\n$dbConnection = mysql_connect(\"$db_host\", \"$db_username\", \"$db_pass\");\r\nif (!$dbConnection)\r\n{\r\n print \"Error='Cannot create MySQL Connection'\";\r\n return;\r\n}\r\nif (!mysql_select_db(\"$db_name\"))\r\n{\r\n print \"Error='Cannot select database'\";\r\n return;\r\n}\r\n// Now you can use the variable $dbConnection to connect in your queries\r\n?>" }, { "alpha_fraction": 0.6511392593383789, "alphanum_fraction": 0.6556962132453918, "avg_line_length": 20.213483810424805, "blob_id": "bb65523bd504a45e00b06c0e8ba685dd8ce53fa0", "content_id": "ba02bb0423637ef81cd24e350273d18ed66c38e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1975, "license_type": "no_license", "max_line_length": 96, "num_lines": 89, "path": "/Kingdom/Cinder/Pilot/www/login.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n//Functions\r\ninclude_once 'scripts/session_functions.php';\r\n//Startup\r\ninclude_once 'scripts/connect_to_mysql.php';\r\n\r\n$username = $_POST['username'];\r\n$password = $_POST['password'];\r\n$success = 0;\r\n\r\n\r\n//Check DB Version\r\n$expectedDBVersion = $_POST['DBVersion'];\r\nif (!CheckDBVersion($expectedDBVersion))\r\n{\r\n\t//Invalid DB Version\r\n\tprint \"Success=$success&Error='Invalid DB version'\";\r\n\treturn;\r\n}\r\n\r\n\r\n//Check if the username exists yet\r\n$userExistsResults = mysql_query(\"\r\n SELECT\r\n COUNT(Username)\r\n FROM\r\n tblUsers\r\n WHERE\r\n Username = '$username'\r\n \");\r\nif (!$userExistsResults)\r\n{\r\n $error = mysql_error();\r\n print \"Success=$success&Error='$error'\";\r\n return;\r\n}\r\n$userCountRow = mysql_fetch_row($userExistsResults);\r\n$userCount = $userCountRow[0];\r\nif ($userCount == 0)\r\n{\r\n print \"Success=$success&Error='Username doesn't exist. Please register before logging in.'\";\r\n return;\r\n}\r\n\r\n\r\n//Check if the password is correct\r\n$passwordCorrectResults = mysql_query(\"\r\n\tSELECT\r\n\t\tp_ID\r\n\tFROM\r\n\t\ttblUsers\r\n\tWHERE\r\n\t\tUsername = '$username'\r\n\t\tAND Password = '$password'\r\n\tLIMIT 1\r\n\t\");\r\nif (!$passwordCorrectResults)\r\n{\r\n print \"Success=$success&Error='Error checking password'\";\r\n return;\r\n}\r\n$userIDCount = mysql_num_rows($passwordCorrectResults);\r\nif ($userIDCount == 0)\r\n{\r\n print \"Success=$success&Error='Password incorrect'\";\r\n return;\r\n}\r\n$userIDRow = mysql_fetch_row($passwordCorrectResults);\r\n$userID = $userIDRow[0];\r\n\r\n$sessionID = GetExistingSession($userID);\r\nif ($sessionID == 0)\r\n\t$sessionID = CreateNewSession($userID);\r\nif ($sessionID == 0)\r\n{\r\n print \"Success=$success&Error='Could not acquire session'\";\r\n return;\r\n}\r\nif (!InitSession($userID, $sessionID))\r\n{\r\n\tprint \"Success=$success&Error='Could not init session'\";\r\n\treturn;\r\n}\r\n\r\n//We have a session, return it\r\nTouchSession($sessionID);\r\n$success = 1;\r\nprint \"Success=$success&SessionID=$sessionID&Username=$username\";\r\n?>" }, { "alpha_fraction": 0.7013888955116272, "alphanum_fraction": 0.7013888955116272, "avg_line_length": 27.799999237060547, "blob_id": "8caa4173237857518fb58788b5af7babc5f0c8fa", "content_id": "6a9af41975b0f4f74c1d08b93325070fd55f05d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 56, "num_lines": 5, "path": "/cinder/views.py", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "from django.http import HttpResponse\n\ndef test(request):\n html = \"<html><body>Pieter is a poop</body></html>\" \n return HttpResponse(html)\n" }, { "alpha_fraction": 0.6096136569976807, "alphanum_fraction": 0.6239892244338989, "avg_line_length": 24.355030059814453, "blob_id": "0f423f1e031d0e7e159c96c34139b0c6b6155531", "content_id": "334485b97d5e2a3279cfcee9ff7a16393fc8b0d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 4452, "license_type": "no_license", "max_line_length": 158, "num_lines": 169, "path": "/Kingdom/Cinder/Pilot/www/scripts/session_functions.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n//Returns an existing, valid sessionID, or 0 if no such sessionID exists\r\nfunction GetExistingSession($userID)\r\n{\r\n $ret = 0;\r\n\r\n $sessionResults = mysql_query(\"\r\n SELECT\r\n s.p_ID\r\n FROM\r\n tblSessions s\r\n INNER JOIN tblUsers u ON u.LastSessionID = s.p_ID\r\n WHERE\r\n\t\t\tu.p_ID = $userID\r\n AND ADDTIME(s.TouchTime, '00:15:00.000000') > CURRENT_TIMESTAMP\r\n LIMIT 1\r\n \");\r\n\tif (!$sessionResults)\r\n\t\t//Error with query\r\n\t\treturn $ret;\r\n $sessionIDCount = mysql_num_rows($sessionResults);\r\n if ($sessionIDCount == 0)\r\n //First login, or last session's touch time is more than 15 mins ago\r\n return $ret;\r\n\r\n //If we got this far, we have an existing session\r\n $sessionIDRow = mysql_fetch_row($sessionResults);\r\n $ret = $sessionIDRow[0];\r\n return $ret;\r\n}\r\n\r\n//Returns the user associated with sessionID, if sessionID represents a valid session\r\nfunction GetUserForSession($sessionID)\r\n{\r\n\t$userResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tu.p_ID\r\n\t\tFROM\r\n\t\t\ttblSessions s\r\n\t\t\tINNER JOIN tblUsers u ON u.LastSessionID = s.p_ID\r\n\t\tWHERE\r\n\t\t\ts.p_ID = $sessionID\r\n\t\t\tAND ADDTIME(s.TouchTime, '00:15:00.000000') > CURRENT_TIMESTAMP\r\n\t\tLIMIT 1\r\n\t\t\");\r\n\tif (!$userResults)\r\n\t\t//Error with query\r\n\t\treturn 0;\r\n\t$userIDCount = mysql_num_rows($userResults);\r\n\tif ($userIDCount == 0)\r\n\t\t//This session's touch time is more than 15 mins ago\r\n\t\treturn 0;\r\n\t\r\n\t//If we got this far, the current session is valid, and we have a user for it\r\n\t$userIDRow = mysql_fetch_row($userResults);\r\n\treturn $userIDRow[0];\r\n}\r\n\r\n//Returns a newly created sessionID, or 0 if an error occurred\r\nfunction CreateNewSession($userID)\r\n{\r\n //New session\r\n $commit = \"COMMIT\";\r\n mysql_query(\"START TRANSACTION\");\r\n\r\n if (!mysql_query(\"\r\n INSERT INTO\r\n tblSessions (TouchTime)\r\n VALUES\r\n (CURRENT_TIMESTAMP)\r\n \"))\r\n $commit = \"ROLLBACK\";\r\n\r\n //Connect session to user\r\n $sessionID = mysql_insert_id();\r\n if (!mysql_query(\"\r\n UPDATE\r\n tblUsers\r\n SET\r\n LastSessionID = $sessionID\r\n WHERE\r\n p_ID = $userID\r\n \"))\r\n $commit = \"ROLLBACK\";\r\n\r\n mysql_query(\"$commit\");\r\n return $sessionID;\r\n}\r\n\r\n//Initializes a session before use- this is important to avoid race conditions and such while the user is playing\r\n//Returns success or not\r\nfunction InitSession($userID, $sessionID)\r\n{\r\n\t//Quests - Remove all requestIDs, as they all have p_IDs now, and the client isn't out of sync\r\n\tif (!mysql_query(\"\r\n\t\tUPDATE\r\n\t\t\ttblQuests\r\n\t\tSET\r\n\t\t\tRequestID = null\r\n\t\tWHERE\r\n\t\t\tUserID = $userID\r\n\t\t\"))\r\n\t\treturn 0;\r\n\treturn 1;\r\n}\r\n\r\n//This function should be called frequently in order to keep a session alive\r\nfunction TouchSession($sessionID)\r\n{\r\n mysql_query(\"\r\n UPDATE\r\n tblSessions\r\n SET\r\n TouchTime = CURRENT_TIMESTAMP\r\n WHERE\r\n p_ID = $sessionID\r\n \");\r\n}\r\n\r\n//This function should be called at the start of any calls to validate the requested session\r\nfunction TouchValidSession($sessionID)\r\n{\r\n\t$expectedDBVersion = $_POST['DBVersion'];\r\n\tif (!CheckDBVersion($expectedDBVersion))\r\n\t\t//Invalid DB Version\r\n\t\treturn 0;\r\n\t\r\n $sessionResults = mysql_query(\"\r\n SELECT\r\n p_ID\r\n FROM\r\n tblSessions\r\n WHERE\r\n\t\t\tADDTIME(TouchTime, '00:15:00.000000') > CURRENT_TIMESTAMP\r\n LIMIT 1\r\n \");\r\n\tif (!$sessionResults)\r\n\t\t//Error with query\r\n\t\treturn 0;\r\n $sessionIDCount = mysql_num_rows($sessionResults);\r\n if ($sessionIDCount == 0)\r\n\t\t//No valid session\r\n\t\treturn 0;\r\n\t\r\n\t//If we got this far, there is a valid session\r\n\tTouchSession($sessionID);\r\n\treturn 1;\r\n}\r\n\r\n//This function should be called at the start of all server requests to make sure the DB version is in sync with the client\r\n//This is called already by TouchValidSession(), so all server requests automatically do this, but any other requests will need to manually call this function\r\nfunction CheckDBVersion($expectedDBVersion)\r\n{\r\n\t$dbVersionResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tDBVersion\r\n\t\tFROM\r\n\t\t\ttblConfiguration\r\n\t\t\");\r\n\tif (!$dbVersionResults || mysql_num_rows($dbVersionResults) != 1)\r\n\t\treturn 0;\r\n\t$dbVersionRow = mysql_fetch_row($dbVersionResults);\r\n\t$dbVersion = $dbVersionRow[0];\r\n\tif ($dbVersion != $expectedDBVersion)\r\n\t\treturn 0;\r\n\t\r\n\treturn 1;\r\n}\r\n?>" }, { "alpha_fraction": 0.6714285612106323, "alphanum_fraction": 0.6714285612106323, "avg_line_length": 22, "blob_id": "1408c16455b6f75b9f87263ca329c88891c47111", "content_id": "3a7d68144a02be7dd298f8ed5f265c0ee615dbb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 70, "license_type": "no_license", "max_line_length": 59, "num_lines": 3, "path": "/Kingdom/Cinder/Pilot/www/index.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\nheader('Location: http://www.duuno.com/dreams/pilot.html');\r\n?>" }, { "alpha_fraction": 0.5990424752235413, "alphanum_fraction": 0.6199880242347717, "avg_line_length": 20.917808532714844, "blob_id": "68d042c14424175fd87ea4a56348fa33d4286bd9", "content_id": "452b9ae4a0ee8900996ae55a35d5f8aa9557ac17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1671, "license_type": "no_license", "max_line_length": 103, "num_lines": 73, "path": "/Kingdom/Cinder/Pilot/www/scripts/account_functions.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n//Returns whether an account by the given name exists or not (1 or 0), or -1 for error\r\nfunction AccountExists($username)\r\n{\r\n\t//Check if the username exists yet\r\n\t$userExistsResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tCOUNT(Username)\r\n\t\tFROM\r\n\t\t\ttblUsers\r\n\t\tWHERE\r\n\t\t\tUsername = '$username'\r\n\t\t\");\r\n\tif (!$userExistsResults)\r\n\t\treturn -1;\r\n\t$userCountRow = mysql_fetch_row($userExistsResults);\r\n\t$userCount = $userCountRow[0];\r\n\tif ($userCount == 0)\r\n\t\treturn 0;\r\n\telse\r\n\t\treturn 1;\r\n}\r\n\r\nfunction CreateNewAccount($username, $password)\r\n{\r\n\t$commit = \"COMMIT\";\r\n\tmysql_query(\"START TRANSACTION\");\r\n\t\r\n\t//Create new user\r\n\tif (!mysql_query(\"\r\n\t\tINSERT INTO\r\n\t\t\ttblUsers (Username, Password)\r\n\t\tVALUES\r\n\t\t\t('$username', '$password')\r\n\t\t\"))\r\n\t\t$commit = \"ROLLBACK\";\r\n\t$userID = mysql_insert_id();\r\n\t\r\n\t//Fill default resources (order: wood, gold, food)\r\n\tif (!mysql_query(\"\r\n\t\tINSERT INTO\r\n\t\t\ttblResources (UserID, Type, Value)\r\n\t\tVALUES\r\n\t\t\t($userID, 1, 50),\r\n\t\t\t($userID, 2, 50),\r\n\t\t\t($userID, 3, 50)\r\n\t\t\"))\r\n\t\t$commit = \"ROLLBACK\";\r\n\t\r\n\t//Fill default minion\r\n\tif (!mysql_query(\"\r\n\t\tINSERT INTO\r\n\t\t\ttblMinions (UserID, Name, FighterStat, MageStat, GathererStat, BuilderStat, QuestID, RequestID, Sex)\r\n\t\tVALUES\r\n\t\t\t($userID, 'Minion', 1, 1, 1, 1, -1, null, 1)\r\n\t\t\"))\r\n\t\t$commit = \"ROLLBACK\";\r\n\t\r\n\t//Fill default quests (order: QUEST_RESOURCEWOOD1, QUEST_RESOURCEGOLD1, QUEST_RESOURCEFOOD1)\r\n\tif (!mysql_query(\"\r\n\t\tINSERT INTO\r\n\t\t\ttblQuests (UserID, QuestIndex, Type, State, RequestID)\r\n\t\tVALUES\r\n\t\t\t($userID, 0, 2, 1, null),\r\n\t\t\t($userID, 1, 2, 1, null),\r\n\t\t\t($userID, 2, 2, 1, null)\r\n\t\t\"))\r\n\t\t$commit = \"ROLLBACK\";\r\n\t\r\n\tmysql_query(\"$commit\");\r\n\treturn $userID;\r\n}\r\n?>" }, { "alpha_fraction": 0.5654187202453613, "alphanum_fraction": 0.5719671845436096, "avg_line_length": 18.15999984741211, "blob_id": "1b441abc75362c12de27baf30a84cf02c4333c7a", "content_id": "8a5ab89d29b953c2bc46c3ce5f676172c0900f5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 15118, "license_type": "no_license", "max_line_length": 104, "num_lines": 750, "path": "/Kingdom/Cinder/Pilot/www/quests.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n//Functions\r\ninclude_once 'scripts/session_functions.php';\r\ninclude_once 'scripts/transaction_functions.php';\r\n//Startup\r\ninclude_once 'scripts/connect_to_mysql.php';\r\n\r\n\r\n$requestID = $_POST['requestID'];\r\n$sessionID = $_POST['sessionID'];\r\n$action = $_POST['action'];\t\t\t//'getall', 'available', 'start', 'update', 'finish', 'reset'\r\n\r\n//Setup our transaction\r\nStartTransaction();\r\n\r\n//Print some stuff that's going back no matter what\r\nOutput(\"Action\", $action);\r\nOutput(\"RequestID\", $requestID);\r\n\r\n//On to the meat of the matter~\r\nif (!TouchValidSession($sessionID))\r\n{\r\n\tCompleteTransaction(\"Session timed out.\");\r\n\treturn;\r\n}\r\n$userID = GetUserForSession($sessionID);\r\nif (!$userID)\r\n{\r\n\tCompleteTransaction(\"Unable to retrieve user for session.\");\r\n\treturn;\r\n}\r\n\r\nif ($action == 'getall')\r\n{\r\n\t//Retrieve a string that represents all info about all quests for the user\r\n\t$questsResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tp_ID,\r\n\t\t\tQuestIndex,\r\n\t\t\tRequestID,\r\n\t\t\tType,\r\n\t\t\tState\r\n\t\tFROM\r\n\t\t\ttblQuests\r\n\t\tWHERE\r\n\t\t\tUserID = $userID\r\n\t\t\");\r\n\tif (!$questsResults)\r\n\t{\r\n\t\tCompleteTransaction(\"Error in quest getall query\");\r\n\t\treturn;\r\n\t}\r\n\t$questString = \"\";\r\n\t$questCount = mysql_num_rows($questsResults);\r\n\tif ($questCount != 0)\r\n\t{\r\n\t\t$questRow = mysql_fetch_assoc($questsResults);\r\n\t\twhile ($questRow)\r\n\t\t{\r\n\t\t\tif ($questString != \"\")\r\n\t\t\t\t$questString .= \"|\";\r\n\t\t\t$pid = $questRow['p_ID'];\r\n\t\t\t$questType = $questRow['Type'];\r\n\t\t\t$questString .= \"$questType,$pid,${questRow['QuestIndex']},${questRow['State']}\";\r\n\t\t\tswitch ($questType)\r\n\t\t\t{\r\n\t\t\t\tcase 1:\t//QUESTTYPE_REALTIME\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\t$questResult = mysql_query(\"\r\n\t\t\t\t\t\t\tSELECT\r\n\t\t\t\t\t\t\t\tStartTime\r\n\t\t\t\t\t\t\tFROM\r\n\t\t\t\t\t\t\t\ttblRealtimeQuests\r\n\t\t\t\t\t\t\tWHERE\r\n\t\t\t\t\t\t\t\tp_ID = $pid\r\n\t\t\t\t\t\t\t\");\r\n\t\t\t\t\t\tif ($questResult &&\r\n\t\t\t\t\t\t\tmysql_num_rows($questResult) == 1)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t$realtimeQuestRow = mysql_fetch_row($questResult);\r\n\t\t\t\t\t\t\t$questString .= \",$realtimeQuestRow[0]\";\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t\tcase 2:\t//QUESTTYPE_GAMETIME\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\t$questResult = mysql_query(\"\r\n\t\t\t\t\t\t\tSELECT\r\n\t\t\t\t\t\t\t\tTimeSoFarMs\r\n\t\t\t\t\t\t\tFROM\r\n\t\t\t\t\t\t\t\ttblGametimeQuests\r\n\t\t\t\t\t\t\tWHERE\r\n\t\t\t\t\t\t\t\tp_ID = $pid\r\n\t\t\t\t\t\t\t\");\r\n\t\t\t\t\t\tif ($questResult &&\r\n\t\t\t\t\t\t\tmysql_num_rows($questResult) == 1)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t$gametimeQuestRow = mysql_fetch_row($questResult);\r\n\t\t\t\t\t\t\t$questString .= \",$gametimeQuestRow[0]\";\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t\tcase 3:\t//QUESTTYPE_STEP\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\t$questResult = mysql_query(\"\r\n\t\t\t\t\t\t\tSELECT\r\n\t\t\t\t\t\t\t\tCurrentSteps\r\n\t\t\t\t\t\t\tFROM\r\n\t\t\t\t\t\t\t\ttblStepQuests\r\n\t\t\t\t\t\t\tWHERE\r\n\t\t\t\t\t\t\t\tp_ID = $pid\r\n\t\t\t\t\t\t\t\");\r\n\t\t\t\t\t\tif ($questResult &&\r\n\t\t\t\t\t\t\tmysql_num_rows($questResult) == 1)\r\n\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t$stepQuestRow = mysql_fetch_row($questResult);\r\n\t\t\t\t\t\t\t$questString .= \",$stepQuestRow[0]\";\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t}\r\n\t\t\t}\r\n\t\t\t$questRow = mysql_fetch_assoc($questsResults);\r\n\t\t}\r\n\t}\r\n\tOutput(\"Result\", $questString);\r\n}\r\nelse if ($action == 'available')\r\n{\r\n\t$newID = ActivateQuest($userID, $requestID);\r\n\tif ($newID == -1)\r\n\t{\r\n\t\tCompleteTransaction(\"Can not activate quest\");\r\n\t\treturn;\r\n\t}\r\n\tOutput(\"NewID\", $newID);\r\n}\r\nelse if ($action == 'reset')\r\n{\r\n\t//We separate this from everything else because the rows may or may not exist already at this point\r\n\t$id = $_POST['id'];\r\n\tif (CheckQuestOwnership($userID, $id))\t//If the user doesn't own such a quest, it doesn't need reseting\r\n\t{\r\n\t\tif (!ResetQuest($userID, $id))\r\n\t\t{\r\n\t\t\tCompleteTransaction(\"Can not reset quest\");\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n}\r\nelse\r\n{\r\n\t$id = $_POST['id'];\r\n\tif (!CheckQuestOwnership($userID, $id))\r\n\t{\r\n\t\tCompleteTransaction(\"Quest doesn't belong to this user\");\r\n\t\treturn;\r\n\t}\r\n\t\r\n\tif ($action == 'start')\r\n\t{\r\n\t\tif (!StartQuest($userID, $id))\r\n\t\t{\r\n\t\t\tCompleteTransaction(\"Can not start quest\");\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n\telse if ($action == 'finish')\r\n\t{\r\n\t\tif (!FinishQuest($userID, $id))\r\n\t\t{\r\n\t\t\tCompleteTransaction(\"Can not finish quest\");\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n\telse if ($action == 'finishrepeatable')\r\n\t{\r\n\t\tif (!FinishRepeatableQuest($userID, $id))\r\n\t\t{\r\n\t\t\tCompleteTransaction(\"Can not finish repeatable quest\");\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n\telse if ($action == 'update')\r\n\t{\r\n\t\tif (!UpdateQuest($userID, $id))\r\n\t\t{\r\n\t\t\tCompleteTransaction(\"Can not update quest\");\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n}\r\nCompleteTransaction(null);\t//Success!\r\nreturn;\r\n\r\n\r\nfunction CheckQuestOwnership($userID, $id)\r\n{\r\n\t$checkResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tCOUNT(p_ID)\r\n\t\tFROM\r\n\t\t\ttblQuests\r\n\t\tWHERE\r\n\t\t\tp_ID = $id\r\n\t\t\tAND UserID = $userID\r\n\t\t\");\r\n\tif (!$checkResults)\r\n\t\t//Error with query\r\n\t\treturn 0;\r\n\t$checkCount = mysql_num_rows($checkResults);\r\n\tif ($checkCount == 0)\r\n\t\treturn 0;\r\n\treturn 1;\r\n}\r\n//Returns the p_ID of the newly created quest row, or -1 if it couldn't be created\r\nfunction ActivateQuest($userID, $requestID)\r\n{\r\n\t$type = $_POST['type'];\r\n\t$questIndex = $_POST['questIndex'];\r\n\t\r\n\t//Check if the quest has already been activated or not\r\n\t$questResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tp_ID\r\n\t\tFROM\r\n\t\t\ttblQuests\r\n\t\tWHERE\r\n\t\t\tUserID = $userID\r\n\t\t\tAND Type = $type\r\n\t\t\tAND QuestIndex = $questIndex\r\n\t\t\");\r\n\tif (!$questResults)\r\n\t\t//Error with query\r\n\t\treturn -1;\r\n\t$questCount = mysql_num_rows($questResults);\r\n\t\r\n\tmysql_query(\"START TRANSACTION\");\r\n\t$commit = true;\r\n\tif ($questCount != 0)\r\n\t{\r\n\t\t//This is a repeatable quest, and has been completed, so reset it\r\n\t\t$questRow = mysql_fetch_assoc($questResults);\r\n\t\t$thisQuestPid = $questRow['p_ID'];\r\n\t\t\r\n\t\tif (!mysql_query(\"\r\n\t\t\tDELETE FROM\r\n\t\t\t\ttblQuests\r\n\t\t\tWHERE\r\n\t\t\t\tp_ID = $thisQuestPid\r\n\t\t\t\"))\r\n\t\t\t$commit = false;\r\n\t\tswitch ($type)\r\n\t\t{\r\n\t\t\tcase 1:\t//QUESTTYPE_REALTIME\r\n\t\t\t\t{\r\n\t\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\t\tDELETE FROM \r\n\t\t\t\t\t\t\ttblRealtimeQuests\r\n\t\t\t\t\t\tWHERE\r\n\t\t\t\t\t\t\tp_ID = $thisQuestPid\r\n\t\t\t\t\t\t\"))\r\n\t\t\t\t\t\t$commit = false;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\tcase 2:\t//QUESTTYPE_GAMETIME\r\n\t\t\t\t{\r\n\t\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\t\tDELETE FROM\r\n\t\t\t\t\t\t\ttblGametimeQuests\r\n\t\t\t\t\t\tWHERE\r\n\t\t\t\t\t\t\tp_ID = $thisQuestPid\r\n\t\t\t\t\t\t\"))\r\n\t\t\t\t\t\t$commit = false;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\tcase 3:\t//QUESTTYPE_STEP\r\n\t\t\t\t{\r\n\t\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\t\tDELETE FROM\r\n\t\t\t\t\t\t\ttblStepQuests\r\n\t\t\t\t\t\tWHERE\r\n\t\t\t\t\t\t\tp_ID = $thisQuestPid\r\n\t\t\t\t\t\t\"))\r\n\t\t\t\t\t\t$commit = false;\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\tif (!mysql_query(\"\r\n\t\tINSERT INTO\r\n\t\t\ttblQuests (UserID, QuestIndex, Type, State, RequestID)\r\n\t\tVALUES\r\n\t\t\t($userID, $questIndex, $type, 1, $requestID)\r\n\t\t\"))\r\n\t\t//Error with query\r\n\t\t$commit = false;\r\n\t$questPid = mysql_insert_id();\r\n\tswitch ($type)\r\n\t{\r\n\t\tcase 1:\t//QUESTTYPE_REALTIME\r\n\t\t\t{\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tINSERT INTO\r\n\t\t\t\t\t\ttblRealtimeQuests (p_ID, StartTime)\r\n\t\t\t\t\tVALUES\r\n\t\t\t\t\t\t($questPid, 0)\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 2:\t//QUESTTYPE_GAMETIME\r\n\t\t\t{\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tINSERT INTO\r\n\t\t\t\t\t\ttblGametimeQuests (p_ID, TimeSoFarMs)\r\n\t\t\t\t\tVALUES\r\n\t\t\t\t\t\t($questPid, 0)\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 3:\t//QUESTTYPE_STEP\r\n\t\t\t{\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tINSERT INTO\r\n\t\t\t\t\t\ttblStepQuests (p_ID, CurrentSteps)\r\n\t\t\t\t\tVALUES\r\n\t\t\t\t\t\t($questPid, 0)\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t}\r\n\t\r\n\tif (!$commit)\r\n\t{\r\n\t\tmysql_query(\"ROLLBACK\");\r\n\t\treturn -1;\r\n\t}\r\n\tmysql_query(\"COMMIT\");\r\n\treturn $questPid;\r\n}\r\nfunction StartQuest($userID, $ID)\r\n{\r\n\t$actualQuestID = GetActualQuestID($userID, $ID);\r\n\tif ($actualQuestID < 0)\r\n\t\treturn 0;\r\n\t$questType = GetQuestType($actualQuestID);\r\n\tif ($questType == -1)\r\n\t\treturn 0;\r\n\t\r\n\tmysql_query(\"START TRANSACTION\");\r\n\t$commit = true;\r\n\r\n\t//Update generic quest state\r\n\t$questUpdateResults = mysql_query(\"\r\n\t\tUPDATE\r\n\t\t\ttblQuests\r\n\t\tSET\r\n\t\t\tState = 2\r\n\t\tWHERE\r\n\t\t\tp_ID = $actualQuestID\r\n\t\t\");\r\n\tif (!$questUpdateResults)\r\n\t\t$commit = false;\r\n\t\r\n\t//Update specific quest state\r\n\tswitch ($questType)\r\n\t{\r\n\t\tcase 1:\t//QUESTTYPE_REALTIME\r\n\t\t\t{\r\n\t\t\t\t$startTime = $_POST['startTime'];\r\n\t\t\t\t$realtimeUpdateResults = mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblRealtimeQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tStartTime = $startTime\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\");\r\n\t\t\t\tif (!realtimeUpdateResults)\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 2:\t//QUESTTYPE_GAMETIME\r\n\t\t\t{\r\n\t\t\t\t$timeSoFarMs = $_POST['timeSoFarMs'];\r\n\t\t\t\t$gametimeUpdateResults = mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblGametimeQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tTimeSoFarMs = $timeSoFarMs\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\");\r\n\t\t\t\tif (!gametimeUpdateResults)\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 3:\t//QUESTTYPE_STEP\r\n\t\t\t{\r\n\t\t\t\t$currentSteps = $_POST['currentSteps'];\r\n\t\t\t\t$stepUpdateResults = mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblStepQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tCurrentSteps = $currentSteps\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\");\r\n\t\t\t\tif (!stepUpdateResults)\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t}\r\n\tif (!$commit)\r\n\t{\r\n\t\tmysql_query(\"ROLLBACK\");\r\n\t\treturn 0;\r\n\t}\r\n\t\r\n\tmysql_query(\"COMMIT\");\r\n\treturn 1;\r\n}\r\nfunction UpdateQuest($userID, $ID)\r\n{\r\n\t$actualQuestID = GetActualQuestID($userID, $ID);\r\n\tif ($actualQuestID < 0)\r\n\t\treturn 0;\r\n\t$questType = GetQuestType($actualQuestID);\r\n\tif ($questType == -1)\r\n\t\treturn 0;\r\n\t\r\n\t//Update specific quest state\r\n\tswitch ($questType)\r\n\t{\r\n\t\tcase 1:\t//QUESTTYPE_REALTIME\r\n\t\t\t{\r\n\t\t\t\t//Nothing to do to update this type- this should never be called\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 2:\t//QUESTTYPE_GAMETIME\r\n\t\t\t{\r\n\t\t\t\t//Just update the elapsed time to whatever the full time is\r\n\t\t\t\t$timeSoFarMs = $_POST['timeSoFarMs'];\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblGametimeQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tTimeSoFarMs = $timeSoFarMs\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\treturn 0;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 3:\t//QUESTTYPE_STEP\r\n\t\t\t{\r\n\t\t\t\t//Just update the current steps to whatever the full amount is\r\n\t\t\t\t$currentSteps = $_POST['currentSteps'];\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblStepQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tCurrentSteps = $currentSteps\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\treturn 0;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t}\r\n\treturn 1;\r\n}\r\nfunction FinishQuest($userID, $ID)\r\n{\r\n\t$actualQuestID = GetActualQuestID($userID, $ID);\r\n\tif ($actualQuestID < 0)\r\n\t\treturn 0;\r\n\t$questType = GetQuestType($actualQuestID);\r\n\tif ($questType == -1)\r\n\t\treturn 0;\r\n\t\r\n\tmysql_query(\"START TRANSACTION\");\r\n\t$commit = true;\r\n\t\r\n\t//Update generic quest state\r\n\t$questUpdateResults = mysql_query(\"\r\n\t\tUPDATE\r\n\t\t\ttblQuests\r\n\t\tSET\r\n\t\t\tState = 3\r\n\t\tWHERE\r\n\t\t\tp_ID = $actualQuestID\r\n\t\t\");\r\n\tif (!$questUpdateResults)\r\n\t\t$commit = false;\r\n\t\r\n\t//Update specific quest state\r\n\tswitch ($questType)\r\n\t{\r\n\t\tcase 1:\t//QUESTTYPE_REALTIME\r\n\t\t\t{\r\n\t\t\t\t//Nothing to do to finish this type\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 2:\t//QUESTTYPE_GAMETIME\r\n\t\t\t{\r\n\t\t\t\t//Just update the elapsed time to whatever the full time is\r\n\t\t\t\t$timeSoFarMs = $_POST['timeSoFarMs'];\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblGametimeQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tTimeSoFarMs = $timeSoFarMs\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 3:\t//QUESTTYPE_STEP\r\n\t\t\t{\r\n\t\t\t\t//Just update the current steps to whatever the full amount is\r\n\t\t\t\t$currentSteps = $_POST['currentSteps'];\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblStepQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tCurrentSteps = $currentSteps\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t}\r\n\tif (!$commit)\r\n\t{\r\n\t\tmysql_query(\"ROLLBACK\");\r\n\t\treturn 0;\r\n\t}\r\n\t\r\n\tmysql_query(\"COMMIT\");\r\n\treturn 1;\r\n}\r\nfunction FinishRepeatableQuest($userID, $ID)\r\n{\r\n\t$actualQuestID = GetActualQuestID($userID, $ID);\r\n\tif ($actualQuestID < 0)\r\n\t\treturn 0;\r\n\t$questType = GetQuestType($actualQuestID);\r\n\tif ($questType == -1)\r\n\t\treturn 0;\r\n\t\r\n\tmysql_query(\"START TRANSACTION\");\r\n\t$commit = true;\r\n\t\r\n\t//Update generic quest state\r\n\t$questUpdateResults = mysql_query(\"\r\n\t\tUPDATE\r\n\t\t\ttblQuests\r\n\t\tSET\r\n\t\t\tState = 1\r\n\t\tWHERE\r\n\t\t\tp_ID = $actualQuestID\r\n\t\t\");\r\n\tif (!$questUpdateResults)\r\n\t\t$commit = false;\r\n\t\r\n\t//Update specific quest state\r\n\tswitch ($questType)\r\n\t{\r\n\t\tcase 1:\t//QUESTTYPE_REALTIME\r\n\t\t\t{\r\n\t\t\t\t//Just reset the start time\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblRealtimeQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tStartTime = 0\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 2:\t//QUESTTYPE_GAMETIME\r\n\t\t\t{\r\n\t\t\t\t//Just reset the elapsed time\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblGametimeQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tTimeSoFarMs = 0\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 3:\t//QUESTTYPE_STEP\r\n\t\t\t{\r\n\t\t\t\t//Just reset the current steps\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tUPDATE\r\n\t\t\t\t\t\ttblStepQuests\r\n\t\t\t\t\tSET\r\n\t\t\t\t\t\tCurrentSteps = 0\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\t$commit = false;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t}\r\n\tif (!$commit)\r\n\t{\r\n\t\tmysql_query(\"ROLLBACK\");\r\n\t\treturn 0;\r\n\t}\r\n\t\r\n\tmysql_query(\"COMMIT\");\r\n\treturn 1;\r\n}\r\nfunction ResetQuest($userID, $ID)\r\n{\r\n\t$actualQuestID = GetActualQuestID($userID, $ID);\r\n\tif ($actualQuestID < 0)\r\n\t\treturn 0;\r\n\t$questType = GetQuestType($actualQuestID);\r\n\tif ($questType == -1)\r\n\t\treturn 0;\r\n\t\t\r\n\t//Remove specific quest data first because of foreign key constraints\r\n\tswitch ($questType)\r\n\t{\r\n\t\tcase 1:\t//QUESTTYPE_REALTIME\r\n\t\t\t{\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tDELETE FROM\r\n\t\t\t\t\t\ttblRealtimeQuests\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\treturn 0;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 2:\t//QUESTTYPE_GAMETIME\r\n\t\t\t{\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tDELETE FROM\r\n\t\t\t\t\t\ttblGametimeQuests\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\treturn 0;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\tcase 3:\t//QUESTTYPE_STEP\r\n\t\t\t{\r\n\t\t\t\tif (!mysql_query(\"\r\n\t\t\t\t\tDELETE FROM\r\n\t\t\t\t\t\ttblStepQuests\r\n\t\t\t\t\tWHERE\r\n\t\t\t\t\t\tp_ID = $actualQuestID\r\n\t\t\t\t\t\"))\r\n\t\t\t\t\treturn 0;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t}\r\n\t\r\n\t//Remove base quest data\r\n\tif (!mysql_query(\"\r\n\t\tDELETE FROM\r\n\t\t\ttblQuests\r\n\t\tWHERE\r\n\t\t\tp_ID = $actualQuestID\r\n\t\t\"))\r\n\t\treturn 0;\r\n\t\t\r\n\treturn 1;\r\n}\r\n\r\n\r\n//Returns the p_ID of the quest in question, or -1 if no such quest exists\r\nfunction GetActualQuestID($userID, $ID)\r\n{\r\n\t//$ID may be either tblQuests.p_ID, or tblQuests.requestID\r\n\t//If $ID is negative, then the absolute value of it is tblQuests.requestID,\r\n\t//whereas if it is positive, it is tblQuests.p_ID\r\n\tif ($ID < 0)\r\n\t{\r\n\t\t//RequestID\r\n\t\t$questResults = mysql_query(\"\r\n\t\t\tSELECT\r\n\t\t\t\tp_ID\r\n\t\t\tFROM\r\n\t\t\t\ttblQuests\r\n\t\t\tWHERE\r\n\t\t\t\tUserID = $userID\r\n\t\t\t\tAND requestID = -$ID\r\n\t\t\t\");\r\n\t\tif (!questResults)\r\n\t\t\treturn -1;\r\n\t\tif (mysql_num_rows($questResults) != 1)\r\n\t\t\treturn -1;\r\n\t\t$row = mysql_fetch_row($questResults);\r\n\t\treturn $row[0];\r\n\t}\r\n\telse\r\n\t{\r\n\t\t//p_ID\r\n\t\t$questResults = mysql_query(\"\r\n\t\t\tSELECT\r\n\t\t\t\tp_ID\r\n\t\t\tFROM\r\n\t\t\t\ttblQuests\r\n\t\t\tWHERE\r\n\t\t\t\tUserID = $userID\r\n\t\t\t\tAND p_ID = $ID\r\n\t\t\t\");\r\n\t\tif (!$questResults)\r\n\t\t\treturn -1;\r\n\t\tif (mysql_num_rows($questResults) != 1)\r\n\t\t\treturn -1;\r\n\t\t$row = mysql_fetch_row($questResults);\r\n\t\treturn $row[0];\r\n\t}\r\n}\r\n//Returns the Type column for the given p_ID, or -1 if an error occurred\r\nfunction GetQuestType($ID)\r\n{\r\n\t$questResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tType\r\n\t\tFROM\r\n\t\t\ttblQuests\r\n\t\tWHERE\r\n\t\t\tp_ID = $ID\r\n\t\t\");\r\n\tif (!$questResults)\r\n\t\treturn -1;\r\n\tif (mysql_num_rows($questResults) != 1)\r\n\t\treturn -1;\r\n\t$row = mysql_fetch_row($questResults);\r\n\treturn $row[0];\r\n}\r\n?>" }, { "alpha_fraction": 0.608989953994751, "alphanum_fraction": 0.6145435571670532, "avg_line_length": 19.750944137573242, "blob_id": "121e5b3d6288ba30e45d9762212de483bc8b276b", "content_id": "1832a691cb6896468b087be40991999886673301", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 5762, "license_type": "no_license", "max_line_length": 249, "num_lines": 265, "path": "/Kingdom/Cinder/Pilot/www/minions.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n//Functions\r\ninclude_once 'scripts/session_functions.php';\r\ninclude_once 'scripts/transaction_functions.php';\r\n//Startup\r\ninclude_once 'scripts/connect_to_mysql.php';\r\n\r\n$requestID = $_POST['requestID'];\r\n$sessionID = $_POST['sessionID'];\r\n$action = $_POST['action'];\t\t\t//'add', 'remove', 'setquest', 'getall'\r\n\r\n//Setup our transaction\r\nStartTransaction();\r\n\r\n//Print some stuff that's going back no matter what\r\nOutput(\"Action\", $action);\r\nOutput(\"RequestID\", $requestID);\r\n\r\n//On to the meat of the matter~\r\nif (!TouchValidSession($sessionID))\r\n{\r\n\tCompleteTransaction(\"Session timed out.\");\r\n\treturn;\r\n}\r\n$userID = GetUserForSession($sessionID);\r\nif (!$userID)\r\n{\r\n\tCompleteTransaction(\"Unable to retrieve user for session.\");\r\n\treturn;\r\n}\r\n\r\nif ($action == 'getall')\r\n{\r\n\t//Retrieve a string that represents all info about all minions for the user\r\n\t$minionsResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tp_ID,\r\n\t\t\tRequestID,\r\n\t\t\tName,\r\n\t\t\tSex,\r\n\t\t\tFighterStat,\r\n\t\t\tMageStat,\r\n\t\t\tGathererStat,\r\n\t\t\tBuilderStat,\r\n\t\t\tQuestID\r\n\t\tFROM\r\n\t\t\ttblMinions\r\n\t\tWHERE\r\n\t\t\tUserID = $userID\r\n\t\t\");\r\n\tif (!$minionsResults)\r\n\t{\r\n\t\tCompleteTransaction(\"Error in minion getall query\");\r\n\t\treturn;\r\n\t}\r\n\t$minionString = \"\";\r\n\t$minionCount = mysql_num_rows($minionsResults);\r\n\tif ($minionCount != 0)\r\n\t{\r\n\t\t$minionRow = mysql_fetch_assoc($minionsResults);\r\n\t\twhile ($minionRow)\r\n\t\t{\r\n\t\t\tif ($minionString != \"\")\r\n\t\t\t\t$minionString .= \"|\";\r\n\t\t\t$pid = $minionRow['p_ID'];\r\n\t\t\t$minionString .= \"1,\" . $pid . \",\" . $minionRow['Name'] . \",\" . $minionRow['Sex'] . \",\" . $minionRow['FighterStat'] . \",\" . $minionRow['MageStat'] . \",\" . $minionRow['GathererStat'] . \",\" . $minionRow['BuilderStat'] . \",\" . $minionRow['QuestID'];\r\n\t\t\t\r\n\t\t\t$minionRow = mysql_fetch_assoc($minionsResults);\r\n\t\t}\r\n\t}\r\n\tOutput(\"Result\", $minionString);\r\n}\r\nelse if ($action == 'add')\r\n{\r\n\t$newID = AddMinion($userID, $requestID);\r\n\tif ($newID == -1)\r\n\t{\r\n\t\tCompleteTransaction(\"Can not add minion\");\r\n\t\treturn;\r\n\t}\r\n\tOutput(\"NewID\", $newID);\r\n}\r\nelse\r\n{\r\n\t$id = $_POST['id'];\r\n\tif (!CheckMinionOwnership($userID, $id))\r\n\t{\r\n\t\tCompleteTransaction(\"Minion doesn't belong to this user\");\r\n\t\treturn;\r\n\t}\r\n\t\r\n\tif ($action == 'remove')\r\n\t{\r\n\t\tif (!RemoveMinion($userID, $id))\r\n\t\t{\r\n\t\t\tCompleteTransaction(\"Can not remove minion\");\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n\telse if ($action == 'setquest')\r\n\t{\r\n\t\tif (!SetQuest($userID, $id))\r\n\t\t{\r\n\t\t\tCompleteTransaction(\"Can not set minion quest\");\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n\telse if ($action == 'setStats')\r\n\t{\r\n\t\tif (!SetStats($userID, $id))\r\n\t\t{\r\n\t\t\tCompleteTransaction(\"Can not set minion stats\");\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n}\r\nCompleteTransaction(null);\t//Success!\r\nreturn;\r\n\r\n\r\nfunction CheckMinionOwnership($userID, $id)\r\n{\r\n\t$checkResults = mysql_query(\"\r\n\t\tSELECT\r\n\t\t\tCOUNT(p_ID)\r\n\t\tFROM\r\n\t\t\ttblMinions\r\n\t\tWHERE\r\n\t\t\tp_ID = $id\r\n\t\t\tAND UserID = $userID\r\n\t\t\");\r\n\tif (!$checkResults)\r\n\t\t//Error with query\r\n\t\treturn 0;\r\n\t$checkCount = mysql_num_rows($checkResults);\r\n\tif ($checkCount == 0)\r\n\t\treturn 0;\r\n\treturn 1;\r\n}\r\n//Returns the p_ID of the newly created minion row, or -1 if it couldn't be created\r\nfunction AddMinion($userID, $requestID)\r\n{\r\n\t$name = $_POST['name'];\r\n\t$sex = $_POST['sex'];\r\n\t$fighterStat = $_POST['fighterStat'];\r\n\t$mageStat = $_POST['mageStat'];\r\n\t$gathererStat = $_POST['gathererStat'];\r\n\t$builderStat = $_POST['builderStat'];\r\n\t\r\n\tif (!mysql_query(\"\r\n\t\tINSERT INTO\r\n\t\t\ttblMinions (UserID, Name, Sex, FighterStat, MageStat, GathererStat, BuilderStat, QuestID, RequestID)\r\n\t\tVALUES\r\n\t\t\t($userID, '$name', $sex, $fighterStat, $mageStat, $gathererStat, $builderStat, -1, $requestID)\r\n\t\t\"))\r\n\t\treturn -1;\r\n\t$minionPid = mysql_insert_id();\r\n\treturn $minionPid;\r\n}\r\nfunction RemoveMinion($userID, $ID)\r\n{\r\n\t$actualMinionID = GetActualMinionID($userID, $ID);\r\n\tif ($actualMinionID < 0)\r\n\t\treturn 0;\r\n\t\r\n\tif (!mysql_query(\"\r\n\t\tDELETE FROM\r\n\t\t\ttblMinions\r\n\t\tWHERE\r\n\t\t\tp_ID = $actualMinionID\r\n\t\t\"))\r\n\t\treturn 0;\r\n\t\t\r\n\treturn 1;\r\n}\r\nfunction SetQuest($userID, $ID)\r\n{\r\n\t$actualMinionID = GetActualMinionID($userID, $ID);\r\n\tif ($actualMinionID < 0)\r\n\t\treturn 0;\r\n\t\r\n\t$questID = $_POST['questId'];\r\n\tif (!mysql_query(\"\r\n\t\tUPDATE\r\n\t\t\ttblMinions\r\n\t\tSET\r\n\t\t\tQuestID = $questID\r\n\t\tWHERE\r\n\t\t\tp_ID = $actualMinionID\r\n\t\t\"))\r\n\t\treturn 0;\r\n\treturn 1;\r\n}\r\nfunction SetStats($userID, $ID)\r\n{\r\n\t$actualMinionID = GetActualMinionID($userID, $ID);\r\n\tif ($actualMinionID < 0)\r\n\t\treturn 0;\r\n\t\r\n\t$fighterStat = $_POST['fighterStat'];\r\n\t$mageStat = $_POST['mageStat'];\r\n\t$gathererStat = $_POST['gathererStat'];\r\n\t$builderStat = $_POST['builderStat'];\r\n\tif (!mysql_query(\"\r\n\t\tUPDATE\r\n\t\t\ttblMinions\r\n\t\tSET\r\n\t\t\tFighterStat = $fighterStat,\r\n\t\t\tMageStat = $mageStat,\r\n\t\t\tGathererStat = $gathererStat,\r\n\t\t\tBuilderStat = $builderStat\r\n\t\tWHERE\r\n\t\t\tp_ID = $actualMinionID\r\n\t\t\"))\r\n\t\treturn 0;\r\n\treturn 1;\r\n}\r\n\r\n\r\n//Returns the p_ID of the minion in question, or -1 if no such quest exists\r\nfunction GetActualMinionID($userID, $ID)\r\n{\r\n\t//$ID may be either tblMinions.p_ID, or tblMinions.requestID\r\n\t//If $ID is negative, then the absolute value of it is tblMinions.requestID,\r\n\t//whereas if it is positive, it is tblMinions.p_ID\r\n\tif ($ID < 0)\r\n\t{\r\n\t\t//RequestID\r\n\t\t$minionResults = mysql_query(\"\r\n\t\t\tSELECT\r\n\t\t\t\tp_ID\r\n\t\t\tFROM\r\n\t\t\t\ttblMinions\r\n\t\t\tWHERE\r\n\t\t\t\tUserID = $userID\r\n\t\t\t\tAND requestID = -$ID\r\n\t\t\t\");\r\n\t\tif (!minionResults)\r\n\t\t\treturn -1;\r\n\t\tif (mysql_num_rows($minionResults) != 1)\r\n\t\t\treturn -1;\r\n\t\t$row = mysql_fetch_row($minionResults);\r\n\t\treturn $row[0];\r\n\t}\r\n\telse\r\n\t{\r\n\t\t//p_ID\r\n\t\t$minionResults = mysql_query(\"\r\n\t\t\tSELECT\r\n\t\t\t\tp_ID\r\n\t\t\tFROM\r\n\t\t\t\ttblMinions\r\n\t\t\tWHERE\r\n\t\t\t\tUserID = $userID\r\n\t\t\t\tAND p_ID = $ID\r\n\t\t\t\");\r\n\t\tif (!$minionResults)\r\n\t\t\treturn -1;\r\n\t\tif (mysql_num_rows($minionResults) != 1)\r\n\t\t\treturn -1;\r\n\t\t$row = mysql_fetch_row($minionResults);\r\n\t\treturn $row[0];\r\n\t}\r\n}\r\n?>" }, { "alpha_fraction": 0.5671405792236328, "alphanum_fraction": 0.5703001618385315, "avg_line_length": 13.875, "blob_id": "e6c20cacaacef39f61e6a46d546c8185821911c3", "content_id": "e09f4534e340674d88e39a16b48f92b1b3d99179", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 633, "license_type": "no_license", "max_line_length": 45, "num_lines": 40, "path": "/Kingdom/Cinder/Pilot/www/scripts/transaction_functions.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n$output = array();\r\n\r\n\r\nfunction StartTransaction()\r\n{\r\n\tmysql_query(\"START TRANSACTION\");\r\n}\r\nfunction Output($variable, $value)\r\n{\r\n\tglobal $output;\r\n\t$output[$variable] = $value;\r\n}\r\nfunction CompleteTransaction($errorMessage)\r\n{\r\n\tglobal $output;\r\n\tif ($errorMessage)\r\n\t{\r\n\t\tmysql_query(\"ROLLBACK\");\r\n\t\tOutput(\"Success\", 0);\r\n\t\tOutput(\"Error\", \"'\" . $errorMessage . \"'\");\r\n\t}\r\n\telse\r\n\t{\r\n\t\tmysql_query(\"COMMIT\");\r\n\t\tOutput(\"Success\", 1);\r\n\t}\r\n\t$started = false;\r\n\tforeach ($output as $key => $value)\r\n\t{\r\n\t\tif (!$started)\r\n\t\t{\r\n\t\t\tprint \"$key=$value\";\r\n\t\t\t$started = true;\r\n\t\t}\r\n\t\telse\r\n\t\t\tprint \"&$key=$value\";\r\n\t}\r\n}\r\n?>" }, { "alpha_fraction": 0.6739299893379211, "alphanum_fraction": 0.6778210401535034, "avg_line_length": 21.399999618530273, "blob_id": "7431db485a8486fd9a78f631d99710ed306d4f10", "content_id": "ef56033a8e253b8ab7148c1da92172cc982a403a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1285, "license_type": "no_license", "max_line_length": 90, "num_lines": 55, "path": "/Kingdom/Cinder/Pilot/www/register.php", "repo_name": "nyxojaele/Possibilities", "src_encoding": "UTF-8", "text": "<?php\r\n//Functions\r\ninclude_once 'scripts/account_functions.php';\r\ninclude_once 'scripts/session_functions.php';\r\n//Startup\r\ninclude_once 'scripts/connect_to_mysql.php';\r\n\r\n$username = $_POST['username'];\r\n$password = $_POST['password'];\r\n$success = 0;\r\n\r\n\r\n//Check DB Version\r\n$expectedDBVersion = $_POST['DBVersion'];\r\nif (!CheckDBVersion($expectedDBVersion))\r\n{\r\n\t//Invalid DB Version\r\n\tprint \"Success=$success&Error='Invalid DB version'\";\r\n\treturn;\r\n}\r\n\r\n\r\n//Check if username is being used already\r\nif (AccountExists($username) != 0)\r\n{\r\n print \"Success=$success&Error='Username already in use'\";\r\n return;\r\n}\r\n\r\n\r\n//Login\r\n$userID = CreateNewAccount($username, $password);\r\nif ($userID == -1)\r\n{\r\n\tprint \"Success=$success&Error='Unable to create new user'\";\r\n\treturn;\r\n}\r\n//There will never be an existing session for a new user, no sense in even checking for it\r\n$sessionID = CreateNewSession($userID);\r\nif ($sessionID == 0)\r\n{\r\n print \"Success=$success&Error='Could not acquire session'\";\r\n return;\r\n}\r\nif (!InitSession($userID, $sessionID))\r\n{\r\n\tprint \"Success=$success&Error='Could not init session'\";\r\n\treturn;\r\n}\r\n\r\n//We have a session, return it\r\nTouchSession($sessionID);\r\n$success = 1;\r\nprint \"Success=$success&SessionID=$sessionID&Username=$username\";\r\n?>" } ]
13
joshmutus/pylabrad-wiki
https://github.com/joshmutus/pylabrad-wiki
edab96be61483b37be724b8291d53fb5a6464f64
3cb7ca8ed50a76d73ae64018823d86dc124cd5c0
a4a3cdf33aaa92ecddc897cfb8a5308b570dfb02
refs/heads/master
2021-01-18T12:01:22.764994
2015-06-29T21:33:48
2015-06-29T21:33:48
38,547,770
0
0
null
2015-07-04T21:10:24
2015-06-29T21:31:16
2015-06-29T21:33:49
null
[ { "alpha_fraction": 0.6174863576889038, "alphanum_fraction": 0.6202185750007629, "avg_line_length": 29.5, "blob_id": "6ded7ef67fdbd05c88ffcb34601705c3a14aab71", "content_id": "9cc193c6c1bd8f199349cf553651ab810f38f172", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 75, "num_lines": 12, "path": "/synchronousclient_1.py", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "import labrad\nimport time\n\ndef square_numbers(cxn, numbers):\n ss = cxn.squaring_server\n t_start = time.time()\n print(\"Starting synchronous requests...\")\n for n in numbers:\n square = ss.square(n)\n print(\"%f**2 = %f\"%(n, square))\n t_total = time.time() - t_start\n print(\"Finished %d requests after %f seconds.\"%(len(numbers), t_total))\n" }, { "alpha_fraction": 0.630723774433136, "alphanum_fraction": 0.6322008967399597, "avg_line_length": 32.849998474121094, "blob_id": "47fd6b2d2fee53b30b6517d49dadc9f9c0ebf86f", "content_id": "fddfc2310b0a1da8dfdda5e9a12af851a8fde3c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 53, "num_lines": 20, "path": "/asynchronousclient_1.py", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "import labrad\nimport time\n\ndef square_and_add(cxn, square_me, x, y):\n ss = cxn.squaring_server\n ads = cxn.addition_server\n t_start = time.time()\n print(\"Sending request to Squaring Server\")\n squared_future = ss.square(square_me, wait=False)\n print(\"Sending request to Addition Server\")\n summed_future = ads.add(x, y, wait=False)\n print(\"Waiting for results...\")\n squared = squared_future.wait()\n summed = summed_future.wait()\n print(\"done\")\n t_total = time.time() - t_start\n print(\"%f**2 = %f\"%(square_me, squared))\n print(\"%d + %d = %d\"%(x, y, summed))\n print(\"Total time taken = %f seconds.\"%(t_total))\n return squared, summed\n" }, { "alpha_fraction": 0.637535810470581, "alphanum_fraction": 0.6418337821960449, "avg_line_length": 29.34782600402832, "blob_id": "e0ef3c3072ca81337b6ed4256d9323d92412b1db", "content_id": "2a62a122b029aa75bff8327fde41e1fbdb1e30b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 53, "num_lines": 23, "path": "/mathserver.py", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "from labrad.server import LabradServer, setting\nfrom twisted.internet.defer import returnValue\n\nclass MathServer(LabradServer):\n name = \"Math Server\"\n\n @setting(10, x='v[]', y='v[]', returns='v[]')\n def add(self, context, x, y):\n addition_server = self.client.addition_server\n result = yield addition_server.add(x, y)\n returnValue(result)\n\n @setting(0, data='v[]', returns='v[]')\n def square(self, context, data):\n squaring_server = self.client.squaring_server\n result = yield squaring_server.square(data)\n returnValue(result)\n\n__server__ = MathServer()\n\nif __name__ == '__main__':\n from labrad import util\n util.runServer(__server__)\n" }, { "alpha_fraction": 0.6230452656745911, "alphanum_fraction": 0.6251028776168823, "avg_line_length": 96.23999786376953, "blob_id": "9007d40e997a2b17e19e7b1a4e77ab3b70627c9a", "content_id": "3f6ebb2ac05ecc4c33936241a2e0d9c05f754b6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2430, "license_type": "no_license", "max_line_length": 385, "num_lines": 25, "path": "/LabRAD-Data-Type-equivalents.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "LabRAD data is specified by type tags that specify the binary format and interpretation of data transmitted over the network. pylabrad maps these data types to native python types, so data received from the network is automatically unflattened to the appropriate python type, and vice versa. \n\n| LabRAD type tag | Python data type | Notes |\n| :--------------:|:-----------------|:------|\n| b | True/False | Boolean value |\n| i | int | 32 bit signed integer |\n| w | long | 32 bit unsigned integer |\n| v[unit] | Value(x, 'unit') | Real number with physical units |\n| v[] | float | Dimensionless real / floating point value |\n| c[unit] | Complex(a+1j*b, 'unit') | Complex number with physical units |\n| c[] | complex | Dimensionless complex number |\n| v, c | float/complex | float/complex with unspecified units. Pending deprecation (***) |\n| *X | list/ndarray | list(*) of elements of type 'X' |\n| *nX | list/ndarray | n-dimensional list(*)/array of type 'X' |\n| (...) | tuple | cluster of elements with specified type |\n| t | datetime.datetime | Time stamp |\n| E, E? | Exception | Error message with optional payload(**) |\n| _ | None | Null type |\n| ? | n/a | Any labrad data(**) | \n\n(*) LabRAD list types unflatten as a LazyList -- a type that emulates a list but doesn't unflatten the raw data until you use it. If it is passed on to another labrad server, it need never be unflattened. In addition, if the list holds numeric data, the .asarray property unflattens it as a numpy ndarray which is much more efficient for large data structures.\n\n(**) ? is an incomplete data type. LabRAD servers can advertise ? to indicate they accept or return any type of labrad data, but it is always replaced with a concrete data type when transmitted over the wire.\n\n(***) v and c have two separate uses. One is as a type pattern for server settings, where it means \"this setting accepts values in any possible units.\" The other is in the wire protocol. If 'v' data is transmitted over the wire to a settings with a v[unit] type tag, the manager will 'convert' the data to the specified units. This use is deprecated and will be eventually removed." }, { "alpha_fraction": 0.7969924807548523, "alphanum_fraction": 0.7969924807548523, "avg_line_length": 87.66666412353516, "blob_id": "a6c6516b3628155a2e2d7436ef057ebfc107c56b", "content_id": "6b13d706734a0962258ea54d25cb65db83fb8752", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 266, "license_type": "no_license", "max_line_length": 103, "num_lines": 3, "path": "/README.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "This is an experiment in using dedicated repos for wikis.\nWe do this to work around the fact that github doesn't support pull request on the built-in repo wikis.\nFor now the content here is not official in any capacity. Please see the wiki inside the pylabrad repo.\n" }, { "alpha_fraction": 0.737472414970398, "alphanum_fraction": 0.7412543296813965, "avg_line_length": 41.878379821777344, "blob_id": "d4439ebf60562899fad028431fb28c3af3e48b53", "content_id": "517b49f6f0bc6be9f2a3938af695d3f410bd9ab6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3173, "license_type": "no_license", "max_line_length": 609, "num_lines": 74, "path": "/Writing-clients.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "#### Direct interface with a server\n```python\ncxn = labrad.connect()\nmyServer = cxn.my_server\nresult = my_server.foo(args)\n```\n\n#### Use a \"packet\" to send multiple requests at once\n\n```python\np = cxn.my_server.packet()\np.foo(args)\np.bar(args)\nresult = p.send() # This is a blocking call\nanswer_to_foo = result['foo']\nanswer_to_bar = result['bar']\n```\n`result` is a dictionary keyed by the name of each setting. If you invoke the same setting multiple times in a single packet the result is a list. Alternately, you can set your own keys for each request\n\n```python\np = cxn.my_server.packet()\np.foo(args, key='banana')\np.foo(args, key='orange')\nresult = p.send()\nanswer_foo1 = result['banana']\nanswer_foo2 = result['orange']\n```\n\n#### Send a request to a server without waiting for the answer\n\n```python\np = cxn.my_server.packet()\np.foo(args)\nresult_future = p.send(wait=False) # This is not a blocking call\n<other code>\n# Now we want to wait for the answer to our request.\n# Once this call completes, we will be able to do\n# result['foo'] to retrieve the result of our request.\nresult = result_future.wait()\nprint(\"result of foo is: \", result['foo']\n```\n\n#### Contexts\n\nEach labrad request happens within a specific context. Servers use that context to store specific information about the client such as their current working directory (datavault, registry) or the currently selected GPIB device (any GPIB device server). Each client gets its own default context, which is normally all you need. To get a new context (for instance to avoid trampling over the working directory, or for keeping pipelined requests to the qubit sequencer separate):\n\n```python\nctx = cxn.context() # Returns a tuple like (0, 5)\np = cxn.my_server.packet(context=ctx)\np.foo(args)\np.send()\n```\n\n#### Signals\n\nLabrad servers can send signals. Signals are asynchronous notifications from servers that do not come as a reply to a particular request. Each client must register to receive a particular signal in order for it to be delivered. Setting up a client to receive a notification is a two step process:\n\n```python\ndef signal_handler(message_ctx, data):\n print(\"Got signal in context %s with data: %s\" % (message_ctx, data))\n\ndef set_listner(cxn, server, ctx=None):\n notification_ID = 4444\n cxn._backend.cxn.addListener(signal_handler, source=server.ID, context=ctx, ID=notification_ID)\n server.signal_name.notify_on_change(notification_ID, True, context=ctx) # True enables notification\n```\n\nThe addListner call sets up the local client code so that when it receives a notification from a specific server with a specific ID and context to dispatch it to the registered function. The notify_on_change call sends a message to the server telling it that we want to receive notifications, and that they should be send to use with the ID and context specified. The ID (`4444` here) serves much the same purpose as a server's setting ID. It is used to route each notification to the proper handler. It can be anything, but it must be unique: a client can't use the same notification ID for two messages.\n\n#### Debugging tips\n\n## Next\n\n[Writing servers](Writing-Servers.md)\n" }, { "alpha_fraction": 0.2675626277923584, "alphanum_fraction": 0.29749542474746704, "avg_line_length": 67.22916412353516, "blob_id": "8a83733f39cde8e4bbdd9663af6d88bccb1152cd", "content_id": "b78f99bbc8921170c98c01435acbfc45aae76627", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3274, "license_type": "no_license", "max_line_length": 300, "num_lines": 48, "path": "/python----wire-type-conversion-rules.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "LabRAD servers advertise type tags for each argument in their settings. These type tags specify what types of data are allowed to be passed to the setting. On the sending side, the pylabrad client must take a given python object and convert it to one of the types advertised by the recipient setting.\n\nHere we specify how various combinations of python objects and type tags should be handled by pylabrad. Python objects are written assuming the following imports\n\n```python\nfrom labrad.units import Hz, mHz\nfrom labrad.units import WithUnit\n```\n\n| LabRAD type tag | Python data | Value sent by pylabrad | Type tag sent by pylabrad |\n| :--------------:|:-----------------|:-----------------------|:--------------------------|\n| b | b = True/False | b | 'b' |\n| b | x = non-boolean | bool(x) | 'b' |\n| i | 3 | 3 | 'i' |\n| i | -4 | -4 | 'i' |\n| i | 4.4 | error | - |\n| w | 234 | 234 | 'w' |\n| w | -234 | error | - |\n| w | 2**40 | error | - |\n| 'v[Hz]' | 4.5*Hz | 4.5 Hz | 'v[Hz]' |\n| 'v[Hz]' | 4.5*mHz | 0.0045 Hz | 'v[Hz]' |\n| 'v[Hz]' | 4.5*s | error | - |\n| 'v[Hz]' | 4.5 | error | - |\n| 'v[]' | 4.5 | 4.5 | 'v[]' |\n| 'v[]' | 4.5*Hz | error | - |\n| 'v[]' | 4 | float(4) | 'v[]' |\n| 'v' | 4.5 | 4.5 | 'v[]' |\n| 'v' | 4.5*Hz | 4.5 Hz | 'v[Hz]' |\n| '?' | x = True/False | x | 'b' |\n| '?' | 5 | 5 | 'i' |\n| '?' | -5 | -5 | 'i' |\n| '?' | 5L | 5 | 'w' |\n| '?' | -5L | error / 5 (??) | -/'i' |\n| '?' | 5.0 | 5.0 | v[] |\n| '?' | WithUnit(5, '') | 5.0 | v[] |\n| '?' | 5.0*Hz | 5.0 | v[Hz] |\n| '*v' | [3*Hz, 5*km] | error | - |\n| '*v' | [3*Hz, 5*kHz] | [3.0, 5000.0] | v[Hz] |\n| '*v[kHz]' | [3*Hz, 5*kHz] | [.003, 5.0] | v[kHz] |\n| '*i' | array(x,np.int32) | x | *i |\n| '*i' | array(x,np.int64) | x (*) | *i |\n| '*i' | array(x,np.uintNN) | x (*) | *i |\n| '*w' | array(x,np.intNN) | x (*) | *w |\n| '*w' | array(x,np.uintNN) | x (*) | *w |\n| '*?' | array(x,np.intNN) | x (*) | *i |\n| '*?' | array(x,np.uintNN) | x (*) | *w |\n\n(*) Numpy integer type (int32,int64,uint32,uint64) conversions to 'i'/'w' may be subject to bounds checking." }, { "alpha_fraction": 0.7536916732788086, "alphanum_fraction": 0.7594014406204224, "avg_line_length": 53.6236572265625, "blob_id": "b1333a4580aeb08d635d13c06f677da357ffc7ba", "content_id": "b04f07f54c8da00a743ee6fa68710a37dbfac53e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5079, "license_type": "no_license", "max_line_length": 511, "num_lines": 93, "path": "/Home.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "# pylabrad: A Python interface to LabRAD\n\npylabrad is a python package to provide an interface to the LabRAD system, a remote procedure call protocol designed for scientific datataking.\n\n## Required Packages\n\nTo get started with pylabrad, you'll first need to install some software:\n\n1. **Python** version 2.7 or greater (Stay tuned for Python 3.x support). Python is, in our opinion, one of the best programming languages out there. It's easy to learn and easy to use, and helps you get more done faster.\n1. **Twisted** version 2.5 or greater. Download it [here](https://twistedmatrix.com/trac/). Twisted is a networking framework for python that we use to handle all the low-level networking stuff for pylabrad.\n1. **pylabrad**. Install from source, or use pip to get it from PyPI.\n\n## Python Add-ons\n\nWe also recommend souping up your python installation with a few other packages to enhance the experience of using python/pylabrad:\n\n1. **IPython**. A replacement for the python shell that provides loads of great features. Our favorite is definitely tab-completion, which makes controlling labrad from the command line a cinch. We'll show examples of IPython usage in the rest of this tutorial.\n1. **Numpy/Scipy**. These packages provide incredible numerical and scientific computing capability for python. Numpy array objects can be used in pylabrad to accelerate operations on numeric data. Also check out the SciPy website for links to other great software for scientific computing with python.\n1. **matplotlib**. Very nice, MATLAB-style plotting for python.\n\n## Getting Started\n\nAs a first step, look through the basic LabRAD tutorial. This will help you get the LabRAD manager up and running, which you need to do before continuing. Also, make sure you've installed the software above. Then we're ready to go.\n\nFire up your python shell, and import the labrad package:\n```python\n>>> import labrad\n```\n\nNow we can establish a connection to the LabRAD manager. We need to know where the manager is running in order to connect to it. Let's suppose that the manager is running on your local machine, then you would type:\n```python\n>>> cxn = labrad.connect('localhost') \n```\n\n(We can set up pylabrad with defaults so that we don't need to specify the hostname every time we connect; see [ConfiguringDefaults] for more information.)\nThis command created a connection to the LabRAD system, and assigned the connection object to a variable called `cxn`. This object is our gateway to LabRAD. Most of the objects in pylabrad have informative string representations, which will be printed out when that object is entered by itself on the command line:\n```python\n>>> cxn\nLabRAD Client: 'Python Client' on localhost:7682\n\nAvailable servers:\n manager\n registry\n```\n\nThe list shows the servers that are logged in to LabRAD and available for us to talk to them. You may see more servers in the list, depending on what is logged in, but you will see at least the manager and registry. These available servers can be accessed as attributes of the connection object, or looked up by name like dictionary entries:\n```python\n>>> cxn['manager']; cxn.manager\nLabRAD Server: manager (ID=1)\n\nThe LabRAD Manager handles the interactions between parts of the LabRAD system.\n\nSettings:\n blacklist\n convert_units\n data_to_string\n expire_context\n help\n lookup\n lr_settings\n notify_on_connect\n notify_on_disconnect\n s__notify_on_context_expiration\n s__register_setting\n s__start_serving\n s__unregister_setting\n servers\n string_to_data\n whitelist\n```\n\nFinally we can talk to a specific setting on the server. The `data_to_string` setting will take any valid LabRAD data and return a string version of it, somewhat like python does with it's `repr` and `str` functions. We can get information about this setting by entering it, just like connection and server objects:\n```python\n>>> cxn.manager['data_to_string']; cxn.manager.data_to_string\nLabRAD Setting: manager.pretty_print (ID=12345)\n\nReturns a string representation of the data sent to it.\n\nAccepts:\n\nReturns:\n s\n\nThis setting is primarily meant for test-purposes.\n```\n\nThis gives some documentation provided by the creator of the server, and also tells us what data types the setting accepts and returns. In the case of the data-to-string setting, any type is acceptable, so the Accepts list is empty. Whatever we pass in, a string will be returned. The setting can be called just like any other method on a python object, except that behind the scenes a request is made over the network to the server where the request is executed and the response comes back. Let's try this out:\n```python\n>>> cxn.manager.data_to_string([(1, 'This'), (2, 'is'), (3, 'a'), (4, 'test.')])\n\"[(+1, 'This'), (+2, 'is'), (+3, 'a'), (+4, 'test.')]\"\n```\n\nCongratulations! You now know how to connect to LabRAD from python, find servers and settings, get information about them, and call them over the network. Using just these tools, you should be able to browse and communicate with your entire LabRAD network." }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 20, "blob_id": "14e3e620ecb35840d5026151644c680b4a928d19", "content_id": "b44883847561fedc3c2794e5850c417b5820196e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 20, "num_lines": 1, "path": "/Type-dac-annotations.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "Needs to be written." }, { "alpha_fraction": 0.7747252583503723, "alphanum_fraction": 0.7754120826721191, "avg_line_length": 82.22856903076172, "blob_id": "1b9f938eed33525128969d5be3f58d4d109895d6", "content_id": "504a83b380eb19b5f59fb0704948988a7d4bcd97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2912, "license_type": "no_license", "max_line_length": 545, "num_lines": 35, "path": "/The-Node.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "The LabRAD Node is a server that is used for starting and stopping other servers.\n\nThere are three files that are relevant to the node:\n* [The node itself](https://github.com/martinisgroup/pylabrad/blob/master/labrad/node/__init__.py)\n* [The twisted plugin for the node](https://github.com/martinisgroup/pylabrad/blob/master/twisted/plugins/labrad_node.py)\n* [The script to easily start the node](https://github.com/martinisgroup/pylabrad/blob/master/scripts/labradnode.py)\n\nEach of these is documented (in the module docstring, at least) with their relevant purposes and requirements (including e.g. registry keys).\n\n## Running the Node\n\nThe node is currently implemented as a Twisted plugin. As long as twisted.plugins.labrad_node is in your pythonpath, you can run the plugin like this:\n\n twistd -n labradnode\n\nwhere -n means \"don't daemonize\" and `labradnode` is the name of the plugin. (Note that either installing pylabrad with setup.py or putting the labrad/ folder in your pythonpath is sufficient.) Running the node as a plugin is handy because if you restart the manager, it will automatically attempt to reconnect.\n\nThere is also a new script in the scripts folder of pylabrad, labradnode.py. All this does is the equivalent of the `twistd` command given earlier. This makes it easy to start the node without a command line (e.g. in Windows).\n\n## Using the Node\n\nEach node has a name--either the environment variable LABRADNODE or the system's hostname. \n\nYou must point the node to the folder where your servers are stored. There is a folder in the registry for the node, `>> Nodes >> [node name]`, with the key \"Directories\", which is a list of directories (strings) that the node looks to for servers. (This registry directory may be created automatically when the node is first run with a given node name.)\n\nThe node is fairly self-explanatory to use; key settings are `available_servers`, `running_servers`, `start`, `stop`, `status`, etc.\n\n## Modifying the Node\n\nNote that if you change the node such that there's a syntax error (i.e. so that the file errors on import) then the plugin will not show up in the list of twisted plugins (from `twistd --help`). You can check for import errors by simply doing `import labrad.node`.\n\nThe twisted plugin docs are [here](https://twistedmatrix.com/documents/current/core/howto/tap.html).\n\n## Known issues\nIf the node can start a server but then can't find it after it has started it (this manifests in the browser as an eternally spinning icon) then you probably just have the names mismatched. Python class variable for the server name must match the name in the node info section of the server. See the documentation [here](https://github.com/martinisgroup/pylabrad/blob/master/labrad/node/__init__.py#L48) and the ini file documentation [here](https://github.com/martinisgroup/pylabrad/blob/master/labrad/node/launchable-server.ini) for more info." }, { "alpha_fraction": 0.6174863576889038, "alphanum_fraction": 0.6284152865409851, "avg_line_length": 20.52941131591797, "blob_id": "ac0ea95bd9eed06f686590c30d57b1ecd7fdaa37", "content_id": "e97c26f1699ec90713c8654bd0246b0b03c4edda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 47, "num_lines": 17, "path": "/squaringserver.py", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "import time\n\nfrom labrad.server import LabradServer, setting\n\nclass SquaringServer(LabradServer):\n name = \"Squaring Server\"\n\n @setting(10, data='v[]', returns='v[]')\n def square(self, c, data):\n time.sleep(2)\n return data**2\n\n__server__ = SquaringServer()\n\nif __name__ == '__main__':\n from labrad import util\n util.runServer(__server__)\n" }, { "alpha_fraction": 0.6293772459030151, "alphanum_fraction": 0.6377058625221252, "avg_line_length": 33.535945892333984, "blob_id": "fc31c377e4950f21b047649468ff1b40b29e4c6d", "content_id": "33e372752f6f04e6327639709a5ec51e5ae60c56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5283, "license_type": "no_license", "max_line_length": 230, "num_lines": 153, "path": "/Pylabrad-Signal-Slot-Example.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "### How to emit a signal in a server\n```python\n #Import Signal class\n from labrad.server import Signal\n\n # Create a LabRAD Signal object.\n onEvent = Signal( ID, signal_name, data_type)\n\n # e.g. ID = 543617. The ID can be any number between X and XX\n # The ID number can not have a leading 0.\n\n # signal_name = 'signal: interesting information acquired'\n # This signal name is not trivial, it is what the client uses to connect to the \n #signal in this instance the client would call \n #server.signal__interesting_information_acquired() note the parsing of : and \n #spaces.\n # data_type = 'i' for integer data. These are the LabRAD data types \n #(see link below for labrad data types).\n\n # Note onEvent Signal must be instantiated as a global class variable, i.e\n\n class ourserver( LabradServer):\n onEvent = ...\n ...\n def initServer(self):\n ...\n```\n\n\n### A simple example of a server emitting a signal\n\n```python\n \"\"\"\n ### BEGIN NODE INFO\n [info]\n name = Emitter Server\n version = 1.0\n description = \n instancename = EmitterServer\n\n [startup]\n cmdline = %PYTHON% %FILE%\n timeout = 20\n\n [shutdown]\n message = 987654321\n timeout = 20\n ### END NODE INFO\n \"\"\"\n\n from labrad.server import LabradServer, setting, Signal\n from twisted.internet import reactor\n from twisted.internet.defer import inlineCallbacks\n import labrad\n\n class EmitterServer(LabradServer):\n\n \"\"\"\n Basic Emitter Server\n \"\"\"\n name = 'Emitter Server'\n \n onEvent = Signal(123456, 'signal: emitted signal', 's')\n #This is the Signal to be emitted with ID# 123456 the name for the \n #client to call is signal__emitted_signal and the labrad type is string\n \n @setting(1, 'Emit Signal', returns='')\n def emitSignal(self, c):\n #function that will onEvent to send signal to listeners\n self.onEvent('Output!')\n #sends signal\n \n if __name__ == \"__main__\":\n from labrad import util\n util.runServer(EmitterServer())\n```\n\n### How to receive a signal in a simple client\n\nNow lets create a client that will listen for the string signal from the previous example (note this follows the writing GUI clients format closely and it may be beneficial to finish the GUI client tutorial prior to this tutorial)\n```python\n from twisted.internet.defer import inlineCallbacks\n from PyQt4 import QtGui\n\n class recieverWidget(QtGui.QWidget):\n\n ID = 654321\n #this is an ID for the client to register to the server\n\n def __init__(self, reactor, parent=None):\n super(recieverWidget, self).__init__(parent)\n self.reactor = reactor\n self.setupLayout()\n self.connect()\n\n def setupLayout(self):\n #setup the layout and make all the widgets\n self.setWindowTitle('Reciever Widget')\n #create a horizontal layout\n layout = QtGui.QHBoxLayout()\n #create the text widget \n self.textedit = QtGui.QTextEdit()\n self.textedit.setReadOnly(True)\n layout.addWidget(self.textedit)\n self.setLayout(layout)\n\n @inlineCallbacks\n def connect(self):\n #make an asynchronous connection to LabRAD\n from labrad.wrappers import connectAsync\n cxn = yield connectAsync(name = 'Signal Widget')\n self.server = cxn.emitter_server\n #connect to emitter server \n yield self.server.signal__emitted_signal(self.ID)\n #connect to signal from server (note the method is named from parsed \n #text of the in the server emitter name)\n yield self.server.addListener(listener = self.displaySignal, \n source = None, ID = self.ID) \n #This registers the client as a listener to the server and assigns a \n #slot (function) from the client to the signal emitted from the server\n #In this case self.displaySignal\n\n def displaySignal(self, cntx, signal):\n self.textedit.append(signal)\n\n def closeEvent(self, x):\n #stop the reactor when closing the widget\n self.reactor.stop()\n\n if __name__==\"__main__\":\n #join Qt and twisted event loops\n a = QtGui.QApplication( [] )\n import qt4reactor\n qt4reactor.install()\n from twisted.internet import reactor\n widget = recieverWidget(reactor)\n widget.show()\n reactor.run()\n```\n### Lets try it out\n\nRun the server and make sure it is listed in your LabRAD manager as \"Emitter Server\". \n\nRun the client, the GUI text box should be displayed and blank. The title of the window should be \"Receiver Widget\". When the server emits a signal we expect the text box to show \"Output!\".\n\nIn order for the server to emit the signal we must call the function emitSignal() from the emitter server. Open a python terminal, import labrad and connect to the server\n```python\n import labrad\n cxn = labrad.connect(name='python terminal')\n emitterserver = cxn.emitter_server\n emitterserver.emit_signal()\n```\nYou should now see the text 'Output!' on your client GUI" }, { "alpha_fraction": 0.6069363951683044, "alphanum_fraction": 0.6083815097808838, "avg_line_length": 30.454545974731445, "blob_id": "dea09f979f2ef285bc9af55ff9d0f101f3a5b462", "content_id": "afe1941dbb6982fdc18fd8e4b022abde90bd51a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 692, "license_type": "no_license", "max_line_length": 54, "num_lines": 22, "path": "/synchronousclient_2.py", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "import labrad\nimport time\n\ndef square_and_add(cxn, square_me, x, y):\n ss = cxn.squaring_server\n ads = cxn.addition_server\n t_start = time.time()\n \n print(\"Sending request to Squaring Server\")\n squared = ss.square(square_me)\n t_square = time.time()\n print(\"Got result %f**2 = %f after %f seconds\"%\\\n (square_me, squared, t_square-t_start))\n \n print(\"Sending request to Addition Server\")\n summed = ads.add(x, y)\n t_summed = time.time()\n print(\"Got result %d + %d = %d after %f seconds\"%\\\n (x, y, summed, t_summed - t_square))\n t_total = t_summed - t_start\n print(\"Total time taken = %f seconds.\"%(t_total,))\n return squared, summed\n" }, { "alpha_fraction": 0.6593707203865051, "alphanum_fraction": 0.7001367807388306, "avg_line_length": 38.30107498168945, "blob_id": "353a4ff4726abbc903ba034d9ab2db31dc4b043f", "content_id": "f3f02226730873e47f74ba1f246db100bfc7f389", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3655, "license_type": "no_license", "max_line_length": 590, "num_lines": 93, "path": "/Units.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "### Introduction\npylabrad supports units: physical quantities with a unit such as meter or gigahertz. It is required to use units when invoking labrad settings that specify units (type tag v[Hz] or similar), but it is recommended that you use them in all of your code to help avoid bugs like those that caused the crash of the [Mars Climate Orbiter](http://en.wikipedia.org/wiki/Mars_Climate_Orbiter).\n\n### Examples\n\n```python \n>>> import labrad.units as U\n>>> from labrad.units import GHz, MHz, Hz, ms, us, ns, km, m, mm\n\n>>> x = U.Value(10.0, 'm') # Construct the quantity 10 meters\n>>> print x\n10.0 m\n>>> x.inUnitsOf('mm') # Converts the units and returns a Value with the new units\nValue(10000.0, 'mm')\n>>> print x['mm'] # Extracts the floating point value 10000.0 -- x in millimeters\n10000.0\n\n>>> y = x / (2 * ns) # 5 meters / nanosecond\n>>> y\nValue(5.0, 'm/ns')\n>>> print y['m/s']\n4999999999.999999\n\n>>> print 3*mm + 1*m \n1003.0 mm\n\n>>> (5*Hz)**2\nValue(25.0, 'Hz^2')\n\n>>> (5*Hz)**(1.0/2)\nValue(2.23606797749979, 'Hz^1/2')\n\n>>> (5*Hz)**(2.0/3)\n...\nTypeError: Only integer or rational exponents allowed\n\n>>> ((5*Hz)**(1.0/3))**2\nValue(2.9240177382128656, 'Hz^2/3')\n```\nDimensioned quantities can be constructed either using the Value constructor (interchangeable with WithUnit or Complex), or by multiplying by the \"unit\" objects imported from labrad.units. Basic arithmetic works as you expect: * and / multiply or divide units while + and - require the arguments to be in compatible units and do appropriate conversion.\n\nNumpy ndarrays can also be given units (currently in the new_units development branch) in the same fashion.\n\n```python\n>>> import labrad.units as U\n>>> m = U.Unit('m')\n\n>>> x = np.array([1, 2, 3, 4, 5]) * m\n>>> x['cm']\narray([100, 200, 300, 400, 500])\n>>> x[1:3]\nValueArray(array([2., 3.]), 'm') \n```\n\nAny expression involving units which results in a dimensionless quantity (all of the base units cancel) generates an ordinary float/complex/ndarray which can be passed to any numeric function. For instance `sin(2*np.pi*w*t)` will work properly if `w` and `t` have the appropriate units, but `sin(2*np.pi*t)` will not -- you cannot compute the sin of 3 seconds. You can also get bare numbers out by using the x['ns'] notation. Note that radians are a valid unit, but they don't get special treatment. If you have quantities in radians you will need to use [] to extract a floating point.\n\n```python\n>>> x = 0.5*np.pi*rad\n>>> np.sin(x) # raises an exception!\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nAttributeError: sin\n>>> np.sin(x['rad'])\n1.0\n```\n\n### Methods\n\nIn addition to the arithmetic operations, dimensioned quantities support the following methods and properties\n\n**unit**<br>\n The quantity's units as a Unit object\n\n**units**<br>\n A property that returns the quantity's units as a string\n\n**inUnitsOf**(_unit_)<br>\n Convert the quantity to the specified unit and return an object with the new units. This always returns the same type as the original (Value, Complex, ValueArray)\n\n**inBaseUnits**(_unit_)<br>\n Convert to SI base units\n\n**isCompatible**(_unit_)<br>\n Test to see if the value could be successfully converted to the specified units.\n\n**isDimensionless**()<br>\n Test to see if the units cancel. If this is true, you can treat the number just like a float/complex/array\n\n**sqrt**()<br>\n Take the square root of the number including units\n\n**__getitem__**(_unit_)<br>\nThe Indexing operator [] is used to convert to the specified units and then return that value as a base type (with no units attached). For instance (5*MHz)['Hz'] == 1000000.0.\n" }, { "alpha_fraction": 0.7272471189498901, "alphanum_fraction": 0.7306283712387085, "avg_line_length": 52.75757598876953, "blob_id": "d2bd372cede81716713ea90ca6698e404a3b1875", "content_id": "21c8152ca8b88bc5ec633b0dfa1ac18a2456cd6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3549, "license_type": "no_license", "max_line_length": 612, "num_lines": 66, "path": "/Writing-Servers.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "#### Basics of pylabrad servers\n\nYou can write a server simply by subclassing the LabradServer class and using the `@setting` decorator\n\n```python\nfrom labrad.server import LabradServer, setting\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nclass MyServer(LabradServer):\n name = \"My Server\" # Will be labrad name of server\n \n @inlineCallbacks\n def initServer(self): # Do initialization here\n pass\n\n @setting(10, data='?', returns='b')\n def is_true(self, c, data):\n return bool(data)\n\n__server__ = MyServer()\n\nif __name__ == '__main__':\n from labrad import util\n util.runServer(__server__)\n```\n#### Servers acting as clients\n\nMany servers need to make requests to other servers. Each server has a 'client' object for this purpose:\n\n```python\n @setting(15, key='s', returns='?')\n def get_registry_key(self, c, server_name):\n p = self.client.registry.packet()\n p.get(key)\n result = yield p.send() # Always wait=False\n returnValue(result['get'])\n```\n\nNotice that servers always make asynchronous requests, so we must use yield to get the value of the Deferred. We then must use returnValue to send the result back, just as if this were an inlineCallbacks method.\n#### Setting decorator\n\nThe setting decorator takes a number of options. The first, required option is the setting ID number. The only requirement is that this must be a positive integer, and it must be unique within the server. The second option is optional, and is the name of the setting to be advertized to the manager. If left off, the name will be derived from the function name. The remaining keyword options are the argument names and types, with the 'returns' keyword argument specifying the return type:\n\n```python\n @setting(10, 'cd', path=['s', '*s', 'w'], returns='*s') # 'cd' is optional and redundant\n def chdir(self, c, path=None): # Path can also be unspecified.\n '''Code goes here'''\n```\nType tags can be specified as a string or a list of strings. The setting decorator will inspect the method signature for default arguments, in which case it will generate additional type tags allowing the argument to be missing.\n\n#### Contexts\n\nThe second argument to every setting function (after self) is the context, usually called `c`. This allows the server to store state on a per-client basis. It acts like a dictionary which the server implementation is allowed to store arbitrary keys. It also has the attribute `c.ID` containing the ID of the client making the request. There are two special methods that a server can override: `initContext(self, c)` and `expireContext(self, c)`. These are called the first time a client uses a specific context, and when the context expires (usually because the client disconnected from the labrad manager).\n\n#### Signals\n\nLabRAD support signals. These are messages sent by servers triggered by an external event, rather than as a response to a specific client request. For instance, the data vault sends a signal to every listening client when a new file is created. This allows e.g. the grapher to update its display without polling the server. Signals are declared in pylabrad servers like so:\n\n```python\nfrom labrad.server import LabradServer, Signal, setting\n\nclass SignalTestServer(LabradServer):\n onNotification = Signal(1234, 'signal: test', 's')\n @setting(10, message='s')\n def notify_clients(self, c, message):\n self.onNotification(message) # send the message to all listening clients\n\n" }, { "alpha_fraction": 0.7214241027832031, "alphanum_fraction": 0.7386008501052856, "avg_line_length": 33.43010711669922, "blob_id": "7a41f61ecddc5ed388056eec4fec495847f85574", "content_id": "626afcf8e9581ae32116436cd9210689f8a2af60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9606, "license_type": "no_license", "max_line_length": 94, "num_lines": 279, "path": "/Asynchronous-programming-HOWTO.md", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "### Purpose\n\nIn this HOWTO, we investigate asynchronous behavior in pylabrad servers and\nclients.\n\n### Synchronous client\n\nWe begin with an example server, [the Squaring Server](squaringserver.py).\nThe Squaring Server has exactly one method, `square`, which computes the\nsquare of a number and returns the result.\nTo simulate a long processor bound computation, we have inserted a time\ndelay into the `square` setting.\nWith the LabRAD manager running, fire up the Squaring Server on your machine.\nThen, from the examples directory, start a python shell.\nIn the interactive shell, type:\n\n```python\nimport labrad\ncxn = labrad.connect()\nss = cxn.squaring_server\nss.square(1.414)\n>>> 1.999...\n```\n\nYou will have noted that when you hit `ENTER` on the `ss.square(1.414)`\nline, there is an approximately 2 second delay before the command\nfinishes and the result comes back.\nThis is due to the 2 second delay in the `square` setting.\nNow we will ask the server to square two numbers, one after the other.\nThis time we write our commands as a script.\nYour can find the script in [synchronousclient_1.py](synchronousclient_1.py),\nwhich is reproduced here:\n\n```python\nimport labrad\nimport time\n\ndef square_numbers(cxn, numbers):\n ss = cxn.squaring_server\n t_start = time.time()\n print(\"Starting synchronous requests...\")\n for n in numbers:\n square = ss.square(n)\n print(\"%f**2 = %f\"%(n, square))\n t_total = time.time() - t_start\n print(\"Finished %d requests after %f seconds.\"%(len(numbers), t_total))\n```\n\nIn the interactive session, import it and run the `square_numbers` function:\n\n```python\nimport synchronousclient_1 as sc1\nsc1.square_numbers(cxn, (1, 2))\n```\n\nYou should see the following output\n\n```python\n>>> Starting synchronous requests...\n>>> 1.000000**2 = 1.000000\n>>> 2.000000**2 = 4.000000\n>>> Finished 2 requests after 4.0 seconds.\n```\n\nEach `square` call on the Squaring Server takes 2 seconds, so our client\nfunction, which invokes `square` twice, takes 4 seconds to run.\nThis is an example of \"synchronous\" behaviour: one task must finish\nbefore the next can begin.\nWith the squaring operation, this is more or less unavoidable.\nSquaring a number requires using a limited physical resource, the CPU.\nWhile the CPU is busy squaring a number, there's no way for it to\nsimultaneously square another number (we're pretending for the moment\nthat your computer has only one processor core).\n\nOperations, like number squaring, for which the resource bottleneck is\nthe local hardware (ie. the CPU) are called \"CPU bound\".\nWhen you have a CPU bound operation (and only one core available) you\ncan't do anything to get work done faster.\nYou've got one CPU and only one operation can use it at a time.\n\nNow suppose we have an [Addition Server](additionserver.py), whose only\nsetting, `add`, computes the sum of two numbers.\nLike the `square` setting on the Squaring Server, `add` takes a bit of\ntime to complete.\nLet's see what happens if we try to get both the Squaring Server and the\nAddition Server to serve requests at the same time.\nFire up the Addition Server on your local machine.\nImport [synchronousclient_2.py](synchronousclient_2.py) and run its `square_and_add` function.\nHere's a copy/paste of synchronousclient_2.py:\n\n```python\nimport labrad\nimport time\n\ndef square_and_add(cxn, square_me, x, y):\n ss = cxn.squaring_server\n ads = cxn.addition_server\n t_start = time.time()\n \n print(\"Sending request to Squaring Server\")\n squared = ss.square(square_me)\n t_square = time.time()\n print(\"Got result %f**2 = %f after %f seconds\"%\\\n (square_me, squared, t_square - t_start))\n \n print(\"Sending request to Addition Server\")\n summed = ads.add(x, y)\n t_summed = time.time()\n print(\"Got result %d + %d = %d after %f seconds\"%\\\n (x, y, summed, t_summed - t_square))\n t_total = t_summed - t_start\n print(\"Total time taken = %f seconds.\"%(t_total,))\n return squared, summed\n```\n\nIn your python shell, type the following:\n\n```python\nimport synchronousclient_2 as sc2\nsc2.square_and_add(cxn, 1.414, 2, 5)\n```\n\nYou should see output something like this:\n\n```python\nSending request to Squaring Server\nGot result 1.414**2 = 1.99 after 2.004 seconds\nSending request to Addition Server\nGot result 2 + 5 = 7 after 1.004 seconds\nTotal time taken = 3.008 seconds.\n>>> (1.999, 7.0)\n```\n\nIf you look in the Addition Server's code, you'll see that the `add`\nsetting has a 1 second delay to simulate time time needed by an intense\ncomputation.\nCombined with the 2 seconds needed by the squaring server, this gives a\ntotal 3 seconds needed for our `square_and_add` function.\n\nIn python, each line of code must complete before the next one can\nexecute.\nIn [synchronousclient_2](synchronousclient_2.py), the line\n\n```python\nsquared = ss.square(square_me)\n```\n\nhas to finish before the subsequent line invokes the Addition Server.\nCalls like this, which require some computation to finish before the\nprogram can move on, are called \"blocking\".\nIn other words, invoking and waiting for the Squaring Server \"blocks\" the\nprogram from moving forward.\nTo be more efficient, we need to send off our request to the Squaring\nServer and _not wait_ for the result before sending our request to the\nAddition Server.\n\n### Asynchronous client\n\nConsider the order of events in [synchronousclient_2](synchronousclient_2.py).\nFirst, we ask the Squaring Server to square 1.414.\nThe Squaring server receives our request, precesses it over a period of 2\nseconds, and then sends the result back to the client (our local python\nshell).\nDuring this time, the Addition Server is doing absolutely nothing.\nWe send our request to the Addition Server only after we get a response\nfrom the Squaring Server.\nSuppose the Squaring and Addition servers were on two different\ncomputers.\nIn that case, waiting for the Squaring Server to to respond before\nsending a request to the Addition Server, makes no sense.\nThe answer to \"2+5\" has nothing to do with the result of 1.414**2,\nso we might as well get both computations started at the same time.\nIn pylabrad, this is easy.\nWe tell pylabrad to not wait for the result of a server request by setting\n`wait=False` in the request:\n\n```python\nrequest = squaring_server.square(1.414, wait=False)\n```\n\nThis makes a request to the `square` setting but does not wait for the\nresult before going to the next line of code.\nTry it yourself in the interactive session.\nYou'll notice that the line completes immediately.\nSince the line completes immediately, but we know that the `square`\nsetting takes 2 seconds to complete, the value of `request` must not\nactually be the result of `1.414**2`.\nIn fact, the result of a LabRAD setting called with `wait=False` is an\nobject which represents the data to be returned at some point in the\nfuture.\nTry typing\n\n```python\ntype(request)\n```\n\nat the interactive session to see for yourself.\nYou'll see that `request` is a `labrad.backend.Future`.\nBehind the scenes, the part of pylabrad which deals with network\ncommunication waits for the data from the squaring Server to come\nback, and when it does, it updates the `request` object with the\nreturned data.\nTo explicitly wait for this data you can call `.wait()` on `response`.\n\n```python\nsquared = request.wait()\n```\n\nThe `wait()` call blocks until the result is received from the Squaring Server,\nat which point it returns that result and stores it in `squared`.\nObjects representing results which may come later are called \"futures\" in\ncomputer programming (hence the name `labrad.backend.Future`).\n\nThe `.wait()` call is blocking.\nWhen we call `.wait` on a future, python will not go to the next line\nuntil the result of the future is available.\n\nWe can use futures to make our two requests to the Squaring and Addition\nservers run faster.\nWe ask the Squaring Server to run `square`, using `wait=False`.\nThen, while that result is being computed, we can immediately ask the\nAddition Server to run `add`, again with `wait=False`.\nBoth servers will start cranking away at their respective computations.\nWe then call `.wait()` on the two resulting futures in any order to\ncollect the results.\nThe code to do this is in [asynchronousclient_1.py](asynchronousclient_1.py),\nwhich is reproduced here:\n\n```python\nimport labrad\nimport time\n\ndef square_and_add(cxn, square_me, x, y):\n ss = cxn.squaring_server\n ads = cxn.addition_server\n t_start = time.time()\n print(\"Sending request to Squaring Server\")\n squared_future = ss.square(square_me, wait=False)\n print(\"Sending request to Addition Server\")\n summed_future = ads.add(x, y, wait=False)\n print(\"Waiting for results...\")\n squared = squared_future.wait()\n summed = summed_future.wait()\n print(\"done\")\n t_total = time.time() - t_start\n print(\"%f**2 = %f\"%(square_me, squared))\n print(\"%d + %d = %d\"%(x, y, summed))\n print(\"Total time taken = %f seconds.\"%(t_total))\n return squared, summed\n```\n\nTo run it, in the interactive session, do this:\n\n```python\nimport asynchronousclient_1 as ac1\nac1.square_and_add(cxn, 1.414, 2, 5)\n```\n\nYou should see output like\n\n```python\nSending request to Squaring Server\nSending request to Addition Server\nWaiting for results...\ndone\n1.414000**2 = 1.99...\n3 + 6 = 9\nTotal time taken = 2.005274 seconds.\n>>> (2.0, 7.0)\n```\n\nNote that the total time is the longest of the two requests we made.\nThis illustrates the benefit of asynchronous (parallel) behavior:\nthe time for the computation is the time of the longest part, rather than the\nsum of all the parts.\n\n### Asynchronous Servers\n\nNeeds to be written.\n" }, { "alpha_fraction": 0.5940054655075073, "alphanum_fraction": 0.6021798253059387, "avg_line_length": 20.58823585510254, "blob_id": "b8b3c0fd5983f2e2d43c3e801804b0e5c545b855", "content_id": "c703a5d7afc3476eca87046be03936329ea0f8fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/additionserver.py", "repo_name": "joshmutus/pylabrad-wiki", "src_encoding": "UTF-8", "text": "import time\n\nfrom labrad.server import LabradServer, setting\n\nclass AdditionServer(LabradServer):\n name = \"Addition Server\"\n\n @setting(10, a='v[]', b='v[]', returns='v[]')\n def add(self, c, a, b):\n time.sleep(1)\n return a + b\n\n__server__ = AdditionServer()\n\nif __name__ == '__main__':\n from labrad import util\n util.runServer(__server__)\n" } ]
17
PrayasJ/GodotMultiplayerBase
https://github.com/PrayasJ/GodotMultiplayerBase
80144621a6983d8eff7126c12feaa09735bd4aba
f3dca7de5be6375e1c4ff89256e9a2bda3a8fe43
d52f1ac9ebc8bd1913f4c763ac0521914ed125c2
refs/heads/main
2023-02-04T21:54:27.506158
2020-12-26T13:11:23
2020-12-26T13:11:23
324,527,893
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6276646852493286, "alphanum_fraction": 0.6409680843353271, "avg_line_length": 30.515151977539062, "blob_id": "289498f2719391d62bd05f22fec1e666757522ee", "content_id": "cf907ecff365028f6b289df3a8f049b0067e4e98", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6239, "license_type": "permissive", "max_line_length": 95, "num_lines": 198, "path": "/server/server.py", "repo_name": "PrayasJ/GodotMultiplayerBase", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, session, request, copy_current_request_context\nfrom flask_socketio import SocketIO, emit, join_room, leave_room, close_room, rooms, disconnect\nimport atexit\nimport json\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport tempfile\nimport time\nimport zipfile\nfrom pathlib import Path\nfrom threading import Timer, Lock\nimport requests\n\nimport sys\n\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtWidgets import QWidget\n\ndef _get_command():\n system = platform.system()\n if system == \"Darwin\":\n command = \"ngrok\"\n elif system == \"Windows\":\n command = \"ngrok.exe\"\n elif system == \"Linux\":\n command = \"ngrok\"\n else:\n raise Exception(\"{system} is not supported\".format(system=system))\n return command\n\ndef _run_ngrok(port):\n command = _get_command()\n ngrok_path = str(Path(tempfile.gettempdir(), \"ngrok\"))\n _download_ngrok(ngrok_path)\n executable = str(Path(ngrok_path, command))\n os.chmod(executable, 0o777)\n ngrok = subprocess.Popen([executable, 'http', str(port)])\n atexit.register(ngrok.terminate)\n localhost_url = \"http://localhost:4040/api/tunnels\" # Url with tunnel details\n time.sleep(1)\n tunnel_url = requests.get(localhost_url).text # Get the tunnel information\n j = json.loads(tunnel_url)\n tunnel_url = j['tunnels'][0]['public_url'] # Do the parsing of the get\n tunnel_url = tunnel_url.replace(\"https\", \"http\")\n return tunnel_url\n\ndef _download_ngrok(ngrok_path):\n if Path(ngrok_path).exists():\n return\n system = platform.system()\n if system == \"Darwin\":\n url = \"https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-darwin-amd64.zip\"\n elif system == \"Windows\":\n url = \"https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-windows-amd64.zip\"\n elif system == \"Linux\":\n url = \"https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip\"\n else:\n raise Exception(f\"{system} is not supported\")\n download_path = _download_file(url)\n with zipfile.ZipFile(download_path, \"r\") as zip_ref:\n zip_ref.extractall(ngrok_path)\n\ndef _download_file(url):\n local_filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n download_path = str(Path(tempfile.gettempdir(), local_filename))\n with open(download_path, 'wb') as f:\n shutil.copyfileobj(r.raw, f)\n return download_path\n\ndef start_ngrok(port):\n ngrok_address = _run_ngrok(port)\n\n qtapp = QApplication(sys.argv)\n window = QWidget()\n window.setWindowTitle('Dummy Multiplayer')\n window.setGeometry(100, 100, 280, 80)\n window.move(60, 15)\n helloMsg = QLabel('<h1>'+ngrok_address.split(\"/\")[-1].split(\".\")[0]+'</h1>', parent=window)\n helloMsg.move(60, 15)\n\n window.show()\n sys.exit(qtapp.exec_())\n\n print(f\" * Running on {ngrok_address}\")\n print(f\" * Traffic stats available on http://127.0.0.1:4040\")\n\ndef run_with_ngrok(app):\n old_run = app.run\n def new_run(*args, **kwargs):\n port = kwargs.get('port', 5000)\n thread = Timer(1, start_ngrok, args=(port,))\n thread.setDaemon(True)\n thread.start()\n old_run(*args, **kwargs)\n app.run = new_run\n\napp = Flask(__name__)\nrun_with_ngrok(app)\n\nasync_mode = None\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, async_mode=async_mode)\nthread = None\nthread_lock = Lock()\n\ndef background_thread():\n \"\"\"Example of how to send server generated events to clients.\"\"\"\n count = 0\n while True:\n socketio.sleep(10)\n count += 1\n socketio.emit('my_response',\n {'data': 'Server generated event', 'count': count})\n\[email protected]('/')\ndef index():\n return render_template('index.html', async_mode=socketio.async_mode)\n\[email protected]\ndef my_event(message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': message['data'], 'count': session['receive_count']})\n\[email protected]\ndef my_broadcast_event(message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': message['data'], 'count': session['receive_count']},\n broadcast=True)\n\[email protected]\ndef join(message):\n join_room(message['room'])\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': 'In rooms: ' + ', '.join(rooms()),\n 'count': session['receive_count']})\n\[email protected]\ndef leave(message):\n leave_room(message['room'])\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': 'In rooms: ' + ', '.join(rooms()),\n 'count': session['receive_count']})\n\[email protected]('close_room')\ndef on_close_room(message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response', {'data': 'Room ' + message['room'] + ' is closing.',\n 'count': session['receive_count']},\n to=message['room'])\n close_room(message['room'])\n\[email protected]\ndef my_room_event(message):\n session['receive_count'] = session.get('receive_count', 0) + 1\n emit('my_response',\n {'data': message['data'], 'count': session['receive_count']},\n to=message['room'])\n\[email protected]\ndef disconnect_request():\n @copy_current_request_context\n def can_disconnect():\n disconnect()\n\n session['receive_count'] = session.get('receive_count', 0) + 1\n # for this emit we use a callback function\n # when the callback function is invoked we know that the message has been\n # received and it is safe to disconnect\n emit('my_response',\n {'data': 'Disconnected!', 'count': session['receive_count']},\n callback=can_disconnect)\n\[email protected]\ndef my_ping():\n emit('my_pong')\n\[email protected]\ndef connect():\n global thread\n with thread_lock:\n if thread is None:\n thread = socketio.start_background_task(background_thread)\n emit('my_response', {'data': 'Connected', 'count': 0})\n\[email protected]('disconnect')\ndef test_disconnect():\n print('Client disconnected', request.sid)\n\nif __name__ == '__main__':\n socketio.run(app)" } ]
1
Sacha24/Billboard
https://github.com/Sacha24/Billboard
5b70bc8d390f860d476e0226d9061a7666d96154
327817a5f0aa8bc9b3c2256ebc467ac1d4df354a
573808a150af70b936bd206e32ecdda57c263751
refs/heads/master
2022-12-10T23:20:34.738064
2019-01-12T18:44:47
2019-01-12T18:44:47
164,860,649
0
0
null
2019-01-09T12:40:56
2019-01-12T19:00:04
2022-09-23T22:16:33
Python
[ { "alpha_fraction": 0.49193549156188965, "alphanum_fraction": 0.694556474685669, "avg_line_length": 15.813559532165527, "blob_id": "6a38cea3d64900fc80c1f2a2c2dd295d491e337a", "content_id": "a5fd3c8350b7483ec54f35ea4518cf36b0752915", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 992, "license_type": "no_license", "max_line_length": 30, "num_lines": 59, "path": "/requirements.txt", "repo_name": "Sacha24/Billboard", "src_encoding": "UTF-8", "text": "aspy.yaml==1.1.1\nastroid==2.1.0\nbeautifulsoup4==4.6.3\nbottle==0.12.13\ncached-property==1.5.1\ncertifi==2018.11.29\ncfgv==1.1.0\nchardet==3.0.4\nClick==7.0\ndj-database-url==0.5.0\nDjango==1.11\ndjango-heroku==0.3.1\nfeedparser==5.2.1\nfilelock==3.0.10\nFlask==1.0.2\ngoogle==2.0.1\ngunicorn==19.9.0\nheroku==0.1.4\nidentify==1.1.7\nidna==2.8\nimportlib-metadata==0.7\nimportlib-resources==1.0.2\nisort==4.3.4\nitsdangerous==1.1.0\nJinja2==2.10\njmespath==0.9.3\nlazy-object-proxy==1.3.1\nlxml==4.2.5\nMarkupSafe==1.1.0\nmccabe==0.6.1\nMyApplication==0.1.0\nmysql-connector-python==8.0.13\nnodeenv==1.3.3\npluggy==0.8.0\npre-commit==1.13.0\nprotobuf==3.6.1\npsycopg2==2.7.6.1\npy==1.7.0\npylint==2.2.2\nPyMySQL==0.8.0\npython-dateutil==1.5\npytz==2018.7\nPyYAML==3.13\nrequests==2.21.0\nshortcuts==0.10.0\nsix==1.12.0\nslackclient==1.3.0\ntemplate==0.4.2\ntoml==0.10.0\ntox==3.6.1\ntranslate==3.5.0\ntyped-ast==1.1.1\nurllib3==1.24.1\nutils==0.9.0\nvirtualenv==16.1.0\nwebsocket-client==0.54.0\nWerkzeug==0.14.1\nwhitenoise==4.1.2\nwrapt==1.10.11\n" }, { "alpha_fraction": 0.6507353186607361, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 29.22222137451172, "blob_id": "b1669094c3d5232b5372b97a4525c825b1f930f7", "content_id": "f5392ba3e248ed4f115030a43927cdd0d5572fda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "no_license", "max_line_length": 72, "num_lines": 9, "path": "/Billboard/models.py", "repo_name": "Sacha24/Billboard", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Model(models.Model):\n title = models.CharField(max_length=100, null=False)\n message = models.CharField(max_length=3000, null=False)\n\n def __str__(self):\n return \"title: {}, message: {}\".format(self.title, self.message)\n" }, { "alpha_fraction": 0.6816220879554749, "alphanum_fraction": 0.6816220879554749, "avg_line_length": 33.088233947753906, "blob_id": "3cbf0b5cdf9ad5f185514dfe3fb0c48cda2639ba", "content_id": "9b6094d8101a1c8a7e5a06167eedcfaa8fc1a03e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1159, "license_type": "no_license", "max_line_length": 106, "num_lines": 34, "path": "/Billboard/views.py", "repo_name": "Sacha24/Billboard", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.contrib.auth import login\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Model\nimport datetime\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n new_user = form.save()\n login(request, new_user)\n return render(request, \"billboard/index.html\")\n else:\n form = UserCreationForm()\n return render(request, \"registration/register.html\", {\"form\": form})\n\n\n@login_required\n@csrf_exempt\ndef index(request):\n result = Model.objects.all()\n date = datetime.date.today()\n if request.user.is_authenticated:\n username = request.user.username\n if request.method == 'POST':\n title = request.POST.get('title')\n message = request.POST.get('message')\n model = Model(title=title, message=message)\n model.save()\n return render(request, 'billboard/index.html', {'result': result, 'date': date, 'username': username})\n" } ]
3
abdulshafiq2002/Abdul_GuiPythonGame
https://github.com/abdulshafiq2002/Abdul_GuiPythonGame
33571b412d64a4fbbf331eed26fe7638c504405e
87196351899a5900f77d10c458b16f02582ae186
c8e6ddb6ca1ff788366aa7ed2d12fcb56d515b4f
refs/heads/master
2022-03-20T04:21:47.065675
2019-12-13T12:30:17
2019-12-13T12:30:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6006006002426147, "alphanum_fraction": 0.6209781169891357, "avg_line_length": 33.27206039428711, "blob_id": "3534dc8a4654917e7e7f5a5226f1762c30481f8d", "content_id": "bf499fdb8afbad6fb14138d29d83b2355ed462db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4662, "license_type": "no_license", "max_line_length": 89, "num_lines": 136, "path": "/PythonGUIGame.py", "repo_name": "abdulshafiq2002/Abdul_GuiPythonGame", "src_encoding": "UTF-8", "text": "from guizero import *\nfrom random import *\n\n\nname =\"\";\n\nLogIn = App(title=\"Log In\", layout=\"grid\",width=400, height=415)\nLBUsernames = ListBox(LogIn,grid=[0,1],width = 30,height = 20)\ncya = Text(LogIn,text = \"Choose Your \\n Account:\",grid = [0,0])\nTXTPassword= Text(LogIn,text=\"Password:\",grid=[1,0])\nTBPassword =TextBox(LogIn,grid=[1,1],width = 25)\n\n\n\nNewUserF = App(title=\"NewUser\", width=300, height=175, layout=\"grid\")\nNewUserF.hide()\nnewusertext = Text(NewUserF,text=\"Username:\",grid=[0,1])\nnewpasstext = Text(NewUserF,text=\"Password:\",grid=[0,2])\nnewsecoundpasstext = Text(NewUserF,text=\"Confirm Password:\",grid=[0,3])\nTBnewusername =TextBox(NewUserF,width=25,grid=[1,1])\nTBnewpassword =TextBox(NewUserF,width=25,grid=[1,2])\nTBnewcpassowrd =TextBox(NewUserF,width=25,grid=[1,3])\n\nGameF = App(title=name, width=300, height=175, layout=\"grid\")\nScore = Text(GameF,text=\"Score = 0\",grid=[3,0])\nGameF.hide()\n\nMsgbox = App(title = \"Message\",height = 75)\nmsg = Text(Msgbox,text = \"\",grid = [0,0])\nMsgbox.hide()\n\ndef MsgBoxClose():\n Msgbox.hide()\n\ndef MessageBox(message):\n msg.value = message\n Msgbox.show()\ndef GetUsernames():\n f = open(\"Users.txt\",\"r\")\n for line in f:\n arr = line.split(\"/\")\n LBUsernames.append(arr[0])\n f.close()\n\ndef CheckUser():\n with open(\"Users.txt\",mode=\"r\") as f:\n for line in f:\n if(line == LBUsernames.value+\"/\"+TBPassword.value):\n GameF.show();\n GameF.title = LBUsernames.value\n LogIn.hide()\n return\n MessageBox(\"Invalid Username or Password\")\n\ndef MakeNewUser():\n if(TBnewusername.value!=\"\"and TBnewpassword.value !=\"\" and TBnewcpassowrd.value!=\"\"):\n if(TBnewpassword.value==TBnewcpassowrd.value):\n with open(\"Users.txt\",mode=\"r\") as f:\n for line in f:\n arr = line.split(\"/\")\n if(arr[0]==TBnewusername.value):\n MessageBox(\"User already exists , please choose another one\")\n return\n with open(\"Users.txt\",mode=\"a+\") as f:\n f.writelines(\"\\n\"+TBnewusername.value+\"/\"+TBnewpassword.value)\n NewUserF.hide()\n LogIn.show()\n MessageBox(\"Account Created\")\n TBnewusername.value=\"\"\n TBnewcpassowrd.value = \"\"\n TBnewpassword.value = \"\"\n LBUsernames.clear()\n GetUsernames()\n else:\n MessageBox(\"Passwords dont match\")\n else:\n MessageBox(\"Please dont leave values empty\")\ndef NewUser():\n LogIn.hide()\n NewUserF.show()\ndef BackToLogIN():\n LogIn.show();\n NewUserF.hide()\n TBnewusername.value=\"\"\n TBnewcpassowrd.value = \"\"\n TBnewpassword.value = \"\"\ndef ChooseRock():\n Play(0)\ndef ChoosePaper():\n Play(1)\ndef ChooseScissors():\n Play(2)\n\ndef Play(PlayerChoice):\n choice = randint(0,2)\n print(choice)\n strp = Choice(PlayerChoice)\n strc = Choice(choice)\n if(choice == PlayerChoice):\n MessageBox(\"You chose \"+strp+\" Computer chose \"+strc+\" \\n So draw\")\n elif(choice ==0 and PlayerChoice==1):\n MessageBox(\"You chose \"+strp+\" Computer chose \"+strc+\" \\n So you win\")\n elif(choice == 0 and PlayerChoice == 2):\n MessageBox(\"You chose \"+strp+\" Computer chose \"+strc+\" \\n So your lose\")\n elif(choice ==1 and PlayerChoice == 0):\n MessageBox(\"You chose \"+strp+\" Computer chose \"+strc+\" \\n So you lose\")\n elif(choice ==1 and PlayerChoice == 2):\n MessageBox(\"You chose \"+strp+\" Computer chose \"+strc+\" \\n So you win\")\n elif(choice == 2 and PlayerChoice == 0):\n MessageBox(\"You chose \"+strp+\" Computer chose \"+strc+\" \\n So you win\")\n elif(choice ==2 and PlayerChoice ==1):\n MessageBox(\"You chose \"+strp+\" Computer chose \"+strc+\" \\n So you lose\")\n\ndef Choice(num):\n if num == 0:\n return \"Rock\"\n elif num ==1:\n return \"Paper\"\n elif num ==2:\n return \"Scissors\"\n\nBTNClose = PushButton(Msgbox,command = MsgBoxClose,text=\"Ok\",grid=[0,1])\nBTNConfirm = PushButton(LogIn,command = CheckUser,text=\"Confirm\",grid=[1,4])\nBTNNewUser = PushButton(LogIn,command = NewUser,text=\"New User?\",grid = [0,4])\nBTNnuConfirm = PushButton(NewUserF,command = MakeNewUser,text=\"Confirm\",grid=[1,4])\nBTNBack = PushButton(NewUserF,command = BackToLogIN,text=\"Back To \\n Log In\",grid=[0,5])\nBTNRock = PushButton(GameF,command = ChooseRock ,text=\"Rock\",grid=[0,0])\nBTNPaper = PushButton(GameF,command = ChoosePaper,text=\"Paper\",grid=[0,1])\nBTNScissors = PushButton(GameF,command = ChooseScissors,text=\"Scissors\",grid=[0,2])\n\n\nGetUsernames()\n\nGameF.display()\n\nLogIn.display()\n\n" } ]
1
manusmansus/Satelite
https://github.com/manusmansus/Satelite
6dae66fed6c3c84e883139f0761eba886acac55a
bccc01961234a32f3e866a4e37ed6596b2b3a231
0ed3e90f7befd02ecdd06296eddbae309e362b7a
refs/heads/master
2020-04-17T18:53:41.392080
2016-09-02T15:12:46
2016-09-02T15:12:46
66,846,417
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5685279369354248, "alphanum_fraction": 0.5820642709732056, "avg_line_length": 35.9375, "blob_id": "33a8542a314a44162dcff996eefdce03cdcf322b", "content_id": "abb6402cd76bb812b8429c95e38c7527681ba002", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1773, "license_type": "no_license", "max_line_length": 125, "num_lines": 48, "path": "/satellite_main.py", "repo_name": "manusmansus/Satelite", "src_encoding": "UTF-8", "text": "import sys\nsys.dont_write_bytecode = True\nfrom visual import *\nimport get_info\nimport sat_functions\n\ndef main():\n sat_info_dict = get_info.get_info()\n\n satelite, time, sat_axis_x, sat_axis_y, sat_axis_z = sat_functions.create_and_draw_objects()\n \n info_entry = 0\n while info_entry < len(sat_info_dict):\n \n sleep(0.001)\n info_entry += 1\n \n\n satelite.pos = (sat_info_dict[\"Info Entry \"+str(info_entry)][\"sat_x_pos\"],\n sat_info_dict[\"Info Entry \"+str(info_entry)][\"sat_y_pos\"],\n sat_info_dict[\"Info Entry \"+str(info_entry)][\"sat_z_pos\"])\n \n sat_axis_x.pos = satelite.pos\n sat_axis_y.pos = satelite.pos\n sat_axis_z.pos = satelite.pos\n \n quaternion = [sat_info_dict[\"Info Entry \"+str(info_entry)][\"quaternion_w\"],\n sat_info_dict[\"Info Entry \"+str(info_entry)][\"quaternion_i\"],\n sat_info_dict[\"Info Entry \"+str(info_entry)][\"quaternion_j\"],\n sat_info_dict[\"Info Entry \"+str(info_entry)][\"quaternion_k\"]]\n \n declination = sat_info_dict[\"Info Entry \"+str(info_entry)][\"declination\"]\n right_ascension = sat_info_dict[\"Info Entry \"+str(info_entry)][\"right_ascension\"]\n \n sat_axis_xyz = sat_functions.calc_sat_axis(quaternion, declination, right_ascension)\n\n sat_axis_x.axis= sat_axis_xyz[0]*20000\n sat_axis_y.axis= sat_axis_xyz[1]*20000\n sat_axis_z.axis= sat_axis_xyz[2]*20000\n \n day, hours, minutes = sat_functions.convert_julian_to_real_time(sat_info_dict[\"Info Entry \"+str(info_entry)][\"time\"])\n time.text =(\"Time: \"+str(day)+\" \"+str(hours)+\":\"+str(minutes))\n\n\n sat_functions.end_loop(satelite) \n\n\nmain()\n" }, { "alpha_fraction": 0.5623108148574829, "alphanum_fraction": 0.6220988631248474, "avg_line_length": 33.77193069458008, "blob_id": "dd87cf693aff1d7d758d4d347318b21c2decc1bc", "content_id": "866efaa1be987892ce96e6b027932c68f68cc132", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3964, "license_type": "no_license", "max_line_length": 116, "num_lines": 114, "path": "/sat_functions.py", "repo_name": "manusmansus/Satelite", "src_encoding": "UTF-8", "text": "from visual import *\nimport math\n\ndef create_and_draw_objects():\n \"\"\"creates and draws the vpython window, time text, earth and satelite\"\"\"\n \n scene = display(width = 800, height = 800, forward=(-1,0,0), up=(0,0,1))\n \n draw_lights()\n\n time= label(pos=(0,9000,17000), text='Time: ')\n\n earth = sphere(pos=(0,0,0), radius= 6371, material = materials.earth,\n up=(0,0,1))\n\n satelite = box(pos=(0,0,0), length = 100, width=100, height=100,\n color=color.red, make_trail=False)\n\n sat_axis_x = arrow(pos=(0,0,0), axis=(1,0,0), shaftwidth=100, make_trail=False, color=color.green)\n sat_axis_y = arrow(pos=(0,0,0), axis=(0,1,0), shaftwidth=100, make_trail=False, color=color.red)\n sat_axis_z = arrow(pos=(0,0,0), axis=(0,0,1), shaftwidth=100, make_trail=False, color=color.blue)\n\n return satelite, time, sat_axis_x, sat_axis_y, sat_axis_z\n\n\ndef draw_lights():\n \"\"\"lights the earth so it isn't too dark\"\"\"\n \n lamp1 = local_light(pos=(0,-50000,0), color=color.white)\n lamp2 = local_light(pos=(0,50000,0), color=color.white)\n lamp3 = local_light(pos=(-50000,0,0), color=color.white)\n lamp4 = local_light(pos=(50000,0,0), color=color.white)\n\ndef calc_sat_axis(quaternion, declination, right_ascension):\n \"\"\"calculates the axis of the satelite using their specific functions\"\"\"\n \n z_axis = calc_z_axis(declination, right_ascension, quaternion)\n\n temp_x_axis = vector_rotation_by_quaternion([1,0,0],quaternion)\n x_axis = calc_x_axis(temp_x_axis, z_axis)\n \n y_axis= cross(x_axis, z_axis)\n\n return [x_axis, y_axis, z_axis]\n\ndef quaternion_mult(q,r):\n \"\"\"does the hamilton product calculation\"\"\"\n \n return [r[0]*q[0]-r[1]*q[1]-r[2]*q[2]-r[3]*q[3],\n r[0]*q[1]+r[1]*q[0]-r[2]*q[3]+r[3]*q[2],\n r[0]*q[2]+r[1]*q[3]+r[2]*q[0]-r[3]*q[1],\n r[0]*q[3]-r[1]*q[2]+r[2]*q[1]+r[3]*q[0]]\n\ndef vector_rotation_by_quaternion(v,q):\n \"\"\"receives a vector and a quaternion as arguments and returns the resulting vector from the hamilton product\"\"\"\n \n r = [0]+v\n q_conj = [q[0],-1*q[1],-1*q[2],-1*q[3]]\n vector_as_list = quaternion_mult(quaternion_mult(q,r),q_conj)[1:]\n end_vector = vector(vector_as_list[0], vector_as_list[1], vector_as_list[2])\n\n return end_vector\n\ndef calc_x_axis(temp_x_axis, z_axis):\n \"\"\"calcs the angle difference between the z and the x_axis and then changes\n the angle to 90 degrees to make them orthogonal\"\"\"\n \n start_angle = math.degrees((diff_angle(temp_x_axis,z_axis)))\n\n angle_change = 90.0 - start_angle\n\n x_axis = rotate(temp_x_axis, angle=radians(angle_change), axis=cross(z_axis,temp_x_axis))\n \n return x_axis\n \ndef calc_z_axis(declination, right_ascension, quaternion):\n \"\"\"calcs the z_axis using declination and right_ascension\"\"\"\n \n declination_vector = rotate((1,0,0), angle=declination, axis=(0,-1,0))\n right_ascension_vector = rotate((1,0,0), angle=right_ascension, axis=(0,0,1))\n z_axis = norm(declination_vector + right_ascension_vector)\n\n return z_axis\n\ndef convert_julian_to_real_time(julian_time):\n \"\"\"converts a julian day starting from 1/1/2000 to normal time\"\"\"\n \n julian_day = int(julian_time)\n if julian_day == 6896:\n day = \"18 Nov 2018\"\n elif julian_day == 6897:\n day = \"19 Nov 2018\"\n elif julian_day == 6898:\n day = \"20 Nov 2018\"\n elif julian_day == 6899:\n day = \"21 Nov 2018\"\n elif julian_day == 6900:\n day = \"22 Nov 2018\"\n \n decimal = julian_time - int(julian_time)\n seconds_in_a_day = 86400\n total_seconds = int(decimal * seconds_in_a_day)\n\n hours, minutes = divmod(total_seconds, 3600)\n minutes = int(math.ceil(float(minutes)/60))\n\n return day, hours, minutes\n\ndef end_loop(satelite):\n \"\"\"if there is no more positional info the satelite stops and turns green\"\"\"\n \n while True:\n sleep(0.01)\n satelite.color = color.green\n" }, { "alpha_fraction": 0.4074585735797882, "alphanum_fraction": 0.43370166420936584, "avg_line_length": 38.135135650634766, "blob_id": "a30f103735e09bea2c6599eefad1003882d4c4f8", "content_id": "838d2096be8fec3160ae6da540cf3ee817b273cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1448, "license_type": "no_license", "max_line_length": 106, "num_lines": 37, "path": "/get_info.py", "repo_name": "manusmansus/Satelite", "src_encoding": "UTF-8", "text": "import os\n\ndef get_info():\n with open(os.path.expanduser(\"~/Desktop/satellite/satellite_data.txt\")) as textfile:\n data = textfile.read().replace('\\n', '').split(\" \")\n\n return create_sat_info_dict(data)\n\n\ndef create_sat_info_dict(lst):\n\n sat_info_dict={}\n info_entry=0\n \n while info_entry<5799:\n \n info_entry +=1\n \n index = lst.index(\"HD\")\n temp_list = lst[index:index+14]\n\n sat_info_dict[\"Info Entry \"+str(info_entry)]={\"star\" : temp_list[0] + temp_list[1] + temp_list[2],\n \"rotation_period\" : float(temp_list[3]),\n \"declination\" : float(temp_list[4]),\n \"right_ascension\" : float(temp_list[5]),\n \"time\" : float(temp_list[6]),\n \"sat_x_pos\" : int(float(temp_list[7]))/1000,\n \"sat_y_pos\" : int(float(temp_list[8]))/1000,\n \"sat_z_pos\" : int(float(temp_list[9]))/1000,\n \"quaternion_w\" : float(temp_list[10]),\n \"quaternion_i\" : float(temp_list[11]),\n \"quaternion_j\" : float(temp_list[12]),\n \"quaternion_k\" : float(temp_list[13])}\n\n lst.pop(index)\n\n return sat_info_dict\n" }, { "alpha_fraction": 0.7779237627983093, "alphanum_fraction": 0.7910643815994263, "avg_line_length": 168.11111450195312, "blob_id": "8f6563502af3beb8c8bdc558c80a01974c7e36ce", "content_id": "0dfda47ec23fd17ba86f4df13fd14c82ddc26d15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3146, "license_type": "no_license", "max_line_length": 690, "num_lines": 18, "path": "/README.md", "repo_name": "manusmansus/Satelite", "src_encoding": "UTF-8", "text": "O objetivo do projeto é representar graficamente a órbita terrestre e os eixos do satélite que observa a estrela HD 7924 num espaço 3D. A informação necessária para esta representação está toda contida no ficheiro de texto satellite_data.txt. O projeto foi realizado com o Python e o module VPython para as representações em 3D.\n\nO projeto está dividido em 3 ficheiros de código: satellite_main.py, get_info.py e sat_functions.py.\n\nO ficheiro satellite_main.py é a partir do qual se corre o programa. Na função main() deste programa é inicialmente chamada a função get_info() do ficheiro get_info.py. Esta função obtém todos os 5799 conjuntos de informação contidos no ficheiro sattelite_data.txt, e ordena-os num dicionário de dicionários através da função create_sat_info_dict(). Cada conjunto do dicionário criado contém o nome do planeta HD 7924 b, o período de rotação do planeta HD 7924b, a declination e a right_ascension da estrela HD 7924, o tempo em formato de Julian Day desde 1/1/2000, a posição do satélite em km de acordo com o referencial J2000, e os quaternions que definem a rotação dos eixos do satélite.\n\nDepois de obter toda a informação necessária, o programa desenha, através do module VPython, a janela de visualização, a Terra, o satélite, o tempo(em dia/hora/minuto) e os eixos do satélite.\n\nDe seguida, o programa entra no loop principal, em que, seguindo a ordem dos conjuntos de informação guardados no dicionário sat_info_dict, a posição do satélite e dos seus eixos vai sendo atualizada. Primeiro é obtida e atualizada a posição do satélite e a dos seus eixos. De seguida, obtém-se o quaternion, a declination e a right_ascension. Estes são usados como argumentos na função calc_sat_axis() do ficheiro sat_fucntions.py. Esta função calcula os eixos X, Y e Z do satélite. \n\nPrimeiro é calculado o eixo Z, que representa o vetor que aponta para a estrela HD 7924, usando o vetor da declination e da right_ascension e depois adicionando-os um ao outro. \nO eixo X é calculado com a função vector_rotation_by_quaternion() e o quaternion e o vetor (1,0,0) como argumentos. Esta função utiliza o produto de Hamilton para devolver o vetor resultante da rotação, mas devido a um possível erro de cálculo, este vector não é ortogonal ao vetor do eixo Z, e por isso tem que ser chamada a função calc_x_axis(), com o vetor do eixo Z e o vetor resultante da rotação como argumentos, que devolve um vetor de para X que é ortogonal ao vetor do eixo Z.\nPara calcular o eixo Y utiliza-se a função cross() do VPython para obter um vetor ortogonal ao eixo X e ao Z.\nA função depois devolve os três eixos e estes são atualizados.\n\nPor último, a função convert_julian_to_real_time() é chamada com a informação do tempo no formato de Julian Time como argumento, e devolve o tempo no formato dia/hora/minuto. A visualização do tempo no programa é depois atualizada.\n\nQuando o programa chega ao fim da informação dada pelo ficheiro de texto satellite_data.txt, o satélite pára de orbitar a terra e a sua cor passa de vermelho para verde.\n" } ]
4
threiten/plotting
https://github.com/threiten/plotting
afa12ee153c84d38a41af672b4205350df3e1590
bfb065104a94f79dabbc155c6336c22d3c9d71f1
56dabb41483c4dd04765b5405486078814e6879f
refs/heads/master
2022-01-20T17:50:33.140374
2022-01-07T12:17:05
2022-01-07T12:17:05
229,024,829
0
2
null
null
null
null
null
[ { "alpha_fraction": 0.49628955125808716, "alphanum_fraction": 0.5026503205299377, "avg_line_length": 30.44285774230957, "blob_id": "8ade72fac531adb3348ce7f3c4d6c452bd45ce7e", "content_id": "7578368f71460694a6776d582b20668efcbeb10b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6603, "license_type": "no_license", "max_line_length": 148, "num_lines": 210, "path": "/plot_base.py", "repo_name": "threiten/plotting", "src_encoding": "UTF-8", "text": "import pickle\nimport gzip\nimport copy\nimport yaml\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\ncmsTextDic = {\n 'wip': r'\\textbf{CMS} \\textit{Work in Progress}',\n 'prelim': r'\\textbf{CMS} \\textit{Preliminary}',\n 'final': r'\\textbf{CMS}',\n 'sim': r'\\textbf{CMS} \\textit{Simulation}'\n}\n\n\nclass plotBase(object):\n\n def __init__(self, df_mc, var, weightstr_mc, label, type, df_data=None, **kwargs):\n\n self.var = var\n self.weightstr_mc = weightstr_mc\n if label is not None:\n self.title = '{} {}'.format(var, label).replace('_', ' ')\n else:\n self.title = '{}'.format(var).replace('_', ' ')\n self.pl_tpe = type\n\n if 'cut' in kwargs:\n df_mc_read = df_mc.query(kwargs['cut'], engine='python')\n if df_data is not None:\n df_data_read = df_data.query(kwargs['cut'], engine='python')\n self.cut = kwargs['cut']\n else:\n df_mc_read = df_mc\n if df_data is not None:\n df_data_read = df_data\n\n if 'lumi' in kwargs:\n self.lumi = kwargs['lumi']\n\n self.lumiStr = None\n if 'lumiStr' in kwargs:\n self.lumiStr = kwargs['lumiStr']\n if 'lumi' in kwargs:\n if kwargs['lumi'] != kwargs['lumiStr']:\n print(\n 'Warning: lumi and lumi to be written on the plot not the same!')\n\n if 'exts' in kwargs:\n self.mc_vars = [var] + \\\n ['{}{}'.format(var, ext) for ext in kwargs['exts']]\n else:\n self.mc_vars = [var]\n\n if 'num' in kwargs:\n self.title += ' {}'.format(kwargs['num'])\n\n self.cmsText = None\n if 'cmsText' in kwargs:\n self.cmsText = kwargs['cmsText']\n else:\n self.cmsText = None\n\n self.mc = df_mc_read.loc[:, self.mc_vars]\n if df_data is not None:\n self.data = df_data_read.loc[:, [var]].values\n else:\n self.data = None\n\n self.mc_weights = df_mc_read.loc[:, [weightstr_mc]].values\n self.mc_weights_cache = df_mc_read.loc[:, [weightstr_mc]].values\n\n with open('/t3home/threiten/python/plotting/texReplacement.yaml') as f:\n self.tex_replace_dict = yaml.load(f, Loader=yaml.FullLoader)\n f.close()\n\n if 'weightstr_data' in kwargs and df_data is not None:\n self.weightstr_data = kwargs['weightstr_data']\n self.data_weights = df_data_read.loc[:, [\n kwargs['weightstr_data']]].values\n\n if 'xgrid' in kwargs:\n self.xgrid = kwargs['xgrid']\n\n if 'ygrid' in kwargs:\n self.ygrid = kwargs['ygrid']\n\n if 'cut_str' in kwargs:\n self.cut_str = kwargs['cut_str']\n\n if 'leg_loc' in kwargs:\n self.leg_loc = kwargs['leg_loc'].replace('_', ' ')\n\n if 'colors' in kwargs:\n self.colors = kwargs['colors']\n else:\n self.colors = list(cm.Dark2.colors)\n\n self.xloc = None\n if 'xloc' in kwargs:\n self.xloc = kwargs['xloc']\n\n def save(self, out_dir, save_dill=False):\n\n if not hasattr(self, 'fig'):\n raise AttributeError(\"Draw figure before saving it!\")\n\n self.fig.savefig('{}/{}_{}.png'.format(out_dir, self.pl_tpe,\n self.title).replace(' ', \"_\"), bbox_inches='tight')\n self.fig.savefig('{}/{}_{}.pdf'.format(out_dir, self.pl_tpe,\n self.title).replace(' ', \"_\"), bbox_inches='tight')\n if save_dill:\n self.dill_save('{}/{}_{}.pkl'.format(out_dir,\n self.pl_tpe, self.title).replace(' ', \"_\"))\n\n def dill_save(self, fname):\n\n pickle.dump(self.fig, gzip.open('{}.gz'.format(fname), mode='wb'))\n\n def set_style(self):\n\n rcP = {'text.usetex': True,\n 'font.family': 'sans-serif',\n 'font.sans-serif': ['Helvetica'],\n 'pdf.fonttype': 42,\n 'axes.labelsize': 20,\n 'font.size': 16,\n 'pgf.rcfonts': True,\n 'text.latex.preamble': r\"\\usepackage{bm, xspace, amsmath}\"}\n # 'text.latex.preview': True}\n plt.rcParams.update(rcP)\n\n @staticmethod\n def parse_repl(repl):\n\n ret = repl.split(':')\n\n if len(ret) == 1:\n ret = ['', ret[0], '']\n elif len(ret) == 2:\n if ret[0] == 'math':\n ret.append('')\n else:\n ret = [''] + ret\n\n if ret[0] == 'math':\n ret[0] = True\n else:\n ret[0] = False\n\n return tuple(ret)\n\n def get_tex_repl(self, st):\n\n lest = len(st)\n stLis = [st[i:j+1] for i in range(lest) for j in range(i, lest)]\n stLis.sort(key=lambda s: len(s))\n unit = ''\n mat = False\n ret = st\n for sti in stLis:\n if sti in self.tex_replace_dict:\n mat, repl, unit = self.parse_repl(self.tex_replace_dict[sti])\n ret = st.replace(sti, repl)\n\n if unit != '':\n ret += r'~\\textnormal{{{0}}}'.format(unit)\n if mat:\n return r'${0}$\\\\'.format(ret)\n elif ret == '':\n return r''\n else:\n return r'{0}\\\\'.format(ret)\n\n def get_tex_cut(self):\n\n c_list = self.cut.replace('and', '').split()\n\n self.cut_str_tex = r''\n for st in c_list:\n self.cut_str_tex += self.get_tex_repl(st)\n\n def normalize_mc(self):\n\n self.mc_weights = copy.deepcopy(self.mc_weights_cache)\n\n if hasattr(self, 'lumi'):\n self.mc_weights *= self.lumi\n else:\n if hasattr(self, 'data_weights'):\n self.mc_weights *= self.data_weights.sum() / self.mc_weights.sum()\n else:\n self.mc_weights *= self.data.shape[0] / self.mc_weights.sum()\n\n @staticmethod\n def drawCMSLogo(ax, opt, fs=22):\n\n ax.text(0, 1, cmsTextDic[opt], fontsize=fs,\n transform=ax.transAxes, va='bottom')\n\n @staticmethod\n def drawIntLumi(ax, intL=138, fs=22):\n\n if intL is None:\n ax.text(1, 1, r'13\\ensuremath{\\,\\text{Te\\hspace{-.08em}V}}\\xspace',\n fontsize=fs, transform=ax.transAxes, ha='right', va='bottom')\n else:\n ax.text(1, 1, r'{}\\mbox{{\\ensuremath{{\\,\\text{{fb}}^{{-1}}}}}}\\xspace (13\\ensuremath{{\\,\\text{{Te\\hspace{{-.08em}}V}}}}\\xspace)'.format(\n intL), fontsize=fs, transform=ax.transAxes, ha='right', va='bottom')\n" }, { "alpha_fraction": 0.4899143576622009, "alphanum_fraction": 0.5045670866966248, "avg_line_length": 41.20883560180664, "blob_id": "6f1e4bca38756734357325697c82205624bf8356", "content_id": "b5d357d809233e9654fab6708d905a47931263b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10510, "license_type": "no_license", "max_line_length": 241, "num_lines": 249, "path": "/plot_dmc_hist.py", "repo_name": "threiten/plotting", "src_encoding": "UTF-8", "text": "from plotting.plot_base import plotBase\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.offsetbox import AnchoredText\nfrom matplotlib.ticker import FormatStrFormatter\nimport numpy as np\n\n\nclass plot_dmc_hist(plotBase):\n\n def __init__(self, df_mc, var, weightstr_mc, label, type, df_data=None, **kwargs):\n\n super(plot_dmc_hist, self).__init__(df_mc, var,\n weightstr_mc, label, type, df_data=df_data, **kwargs)\n\n self.ratio_lim = kwargs['ratio_lim']\n self.bins = np.linspace(\n kwargs['xmin'], kwargs['xmax'], kwargs['bins']+1)\n\n self.mcHatch = None\n if 'mcHatch' in kwargs:\n self.mcHatch = kwargs['mcHatch']\n\n if ('norm' in kwargs and kwargs['norm'] is True) or 'lumi' in kwargs:\n self.normalize_mc()\n\n if 'normToMax' in kwargs:\n self.normToMax = kwargs['normToMax']\n if kwargs['normToMax']:\n maxBMC = []\n for var in self.mc_vars:\n maxBMC.append(np.max(np.histogram(self.mc.loc[:, [var]].values, density=False, bins=self.bins, range=(\n self.bins[0], self.bins[-1]), weights=self.mc_weights)[0]))\n if df_data is not None:\n if hasattr(self, 'data_weights'):\n maxBData = np.max(np.histogram(\n self.data, density=False, bins=self.bins, weights=self.data_weights)[0])\n self.normFactor = max([maxBData] + maxBMC)\n self.mc_weights /= self.normFactor\n self.data_weights /= self.normFactor\n else:\n maxBData = np.max(np.histogram(\n self.data, density=False, bins=self.bins)[0])\n self.normFactor = max([maxBData] + maxBMC)\n self.mc_weights /= self.normFactor\n self.data_weights = np.divide(\n np.ones_like(self.data), self.normFactor)\n else:\n self.normFactor = max(maxBMC)\n self.mc_weights /= self.normFactor\n\n if 'ratio' in kwargs:\n if df_data is not None:\n self.ratio = kwargs['ratio']\n elif df_data is None:\n self.ratio = False\n\n if 'logy' in kwargs:\n self.logy = kwargs['logy']\n\n @staticmethod\n def get_annot_pos(leg_pos, figsize):\n\n ret = [0, 0]\n\n if leg_pos.x0 >= 0.7*figsize[0]:\n lr = 'right'\n ret[0] = 1\n elif leg_pos.x1 <= 0.5*figsize[0]:\n lr = 'left'\n ret[0] = 0\n else:\n lr = 'right'\n ret[0] = 1\n if leg_pos.y0 >= 0.7*figsize[1]:\n ret[1] = -0.3\n tb = 'top'\n elif leg_pos.y1 <= 0.5*figsize[1]:\n ret[1] = 1\n tb = 'bottom'\n else:\n ret[1] = 1\n tb = 'bottom'\n\n return ret, lr, tb\n\n def draw(self):\n\n self.set_style()\n\n if self.ratio:\n fig, axes = plt.subplots(2, figsize=(8, 6), sharex=True, gridspec_kw={\n 'height_ratios': [3, 1]})\n fig.tight_layout()\n plt.subplots_adjust(hspace=0.1)\n top = axes[0]\n bottom = axes[1]\n else:\n fig = plt.figure(figsize=(8, 6))\n axes = None\n top = plt\n\n self.xc = 0.5*(self.bins[1:]+self.bins[:-1])\n self.binw = self.xc[1] - self.xc[0]\n\n self.mc_labels = ['MC_{}'.format(var).replace(\n self.mc_vars[0], '').replace('_', ' ') for var in self.mc_vars]\n\n if self.ratio:\n bottom.grid(linestyle='-.', color='lightslategrey', alpha=0.5)\n\n self.mc_hists = []\n mc_errs = []\n for var in self.mc_vars:\n i = self.mc_vars.index(var)\n hist, _, _ = top.hist(self.mc.loc[:, [var]].values, bins=self.bins, range=(self.bins[0], self.bins[-1]), histtype='step',\n alpha=1, weights=self.mc_weights, label=self.mc_labels[i], color=self.colors[i], linestyle='solid', linewidth=3, hatch=self.mcHatch)\n mc_err, _ = np.histogram(self.mc.loc[:, [var]].values, density=False, bins=self.bins, range=(\n self.bins[0], self.bins[-1]), weights=self.mc_weights**2)\n mc_err = np.sqrt(mc_err)\n mc_errs.append(mc_err)\n self.mc_hists.append(hist)\n\n if self.data is not None:\n if hasattr(self, 'data_weights'):\n self.data_hist, _ = np.histogram(\n self.data, density=False, bins=self.bins, weights=self.data_weights)\n data_err, _ = np.histogram(\n self.data, density=False, bins=self.bins, weights=self.data_weights**2)\n self.data_err = np.sqrt(data_err)\n else:\n self.data_hist, _ = np.histogram(\n self.data, density=False, bins=self.bins)\n self.data_err = np.sqrt(self.data_hist)\n (_, caps, _) = top.errorbar(self.xc, self.data_hist, ls='None', yerr=self.data_err, xerr=np.ones_like(\n self.data_hist)*self.binw*0.5, color='black', label=r'Data', marker='.', markersize=8)\n for cap in caps:\n cap.set_markeredgewidth(0)\n\n if axes is None:\n axes = fig.axes\n top = axes[0]\n\n if hasattr(self, 'xgrid'):\n top.grid(self.xgrid, axis='x', linestyle='-.',\n color='lightslategrey', alpha=0.2)\n\n if hasattr(self, 'ygrid'):\n top.grid(self.xgrid, axis='y', linestyle='-.',\n color='lightslategrey', alpha=0.2)\n\n if self.ratio:\n for i in range(len(self.mc_vars)):\n with np.errstate(divide='ignore', invalid='ignore'):\n rdatamc = np.divide(self.data_hist, self.mc_hists[i])\n rdatamc_err = np.divide(\n 1., self.mc_hists[i]) * np.sqrt(self.data_err**2 + rdatamc**2 * mc_errs[i]**2)\n (_, caps, _) = bottom.errorbar(self.xc, rdatamc, ls='None', xerr=np.ones_like(\n rdatamc)*self.binw*0.5, yerr=rdatamc_err, color=self.colors[i], marker='.', markersize=7)\n for cap in caps:\n cap.set_markeredgewidth(0)\n\n bottom.plot((self.bins[0], self.bins[-1]), (1, 1), 'k--')\n bottom.set_ylabel(r'\\textit{Data / MC}', fontsize=13)\n bottom.set_ylim(self.ratio_lim)\n\n if hasattr(self, 'logy'):\n if self.logy:\n top.set_yscale('log')\n top.set_xlim(self.bins[0], self.bins[-1])\n\n if self.var in self.tex_replace_dict:\n math, var, unit = self.parse_repl(self.tex_replace_dict[self.var])\n if unit == '':\n axes[-1].set_xlabel(\n r'$\\boldsymbol{{{0}}}$'.format(var, unit), fontsize=20, loc=self.xloc)\n else:\n axes[-1].set_xlabel(r'$\\boldsymbol{{{0}}}\\,\\,\\left[\\textnormal{{{1}}}\\right]$'.format(\n var, unit), fontsize=20, loc=self.xloc)\n else:\n axes[-1].set_xlabel(r'\\textit{{{0}}}'.format(\n self.var.replace('_', '\\_')), fontsize=20, loc=self.xloc)\n\n axes[0].set_ylabel(r'\\textit{{{0}}}'.format(\n 'Events / {0:.4f}'.format(self.binw)), fontsize=20)\n # axes[0].yaxis.set_major_formatter(FormatStrFormatter('%.4g'))\n if hasattr(self, 'normToMax'):\n if self.normToMax:\n axes[0].set_ylabel(r'\\textit{a.u.}', fontsize=20, loc='top')\n\n # fig.suptitle(self.title, y=0.99)\n if hasattr(self, 'leg_loc'):\n top.legend(loc=self.leg_loc, framealpha=0)\n else:\n top.legend(loc='best', framealpha=0)\n\n if self.cmsText is not None:\n self.drawCMSLogo(top, self.cmsText)\n\n if self.lumiStr is not None:\n self.drawIntLumi(top, self.lumiStr)\n\n if hasattr(self, 'cut_str'):\n if self.cut_str or isinstance(self.cut_str, str):\n fig.canvas.draw()\n figsize = fig.get_size_inches()*fig.dpi\n pos = top.get_legend().get_window_extent()\n ann_pos, lr, tb = self.get_annot_pos(pos, figsize)\n lc = {'left': 'left', 'right': 'right'}\n\n if isinstance(self.cut_str, str):\n cut_str_fig = self.cut_str\n else:\n self.get_tex_cut()\n cut_str_fig = self.cut_str_tex\n\n top.annotate(r'\\begin{{{0}}}{1}\\end{{{0}}}'.format('flush{}'.format(lr), cut_str_fig), tuple(\n ann_pos), fontsize=14, xycoords=top.get_legend(), bbox={'boxstyle': 'square', 'alpha': 0, 'fc': 'w', 'pad': 0}, ha=lr, va=tb)\n # else:\n # self.get_tex_cut()\n # top.annotate(r'\\begin{{{0}}}{1}\\end{{{0}}}'.format('flush{}'.format(lr), self.cut_str_tex), tuple(ann_pos), fontsize=14, xycoords=top.get_legend(), bbox={'boxstyle': 'square', 'alpha': 0, 'fc': 'w', 'pad': 0}, ha=lr, va=tb)\n\n # cut_box = AnchoredText('{0}'.format(self.cut_str), loc=2, frameon=False)\n # top.add_artist(cut_box)\n\n self.fig = fig\n\n def addHist(self, arr, weights, label):\n\n cInd = len(self.mc_hists)+1\n hist, _, _ = self.fig.axes[0].hist(arr, bins=self.bins, range=(self.bins[0], self.bins[-1]), histtype='step',\n alpha=1, weights=weights, label=label, color=self.colors[cInd], linestyle='solid', linewidth=3)\n err, _ = np.histogram(arr, density=False, bins=self.bins, range=(\n self.bins[0], self.bins[-1]), weights=weights**2)\n err = np.sqrt(err)\n self.mc_hists.append(hist)\n\n if self.ratio:\n with np.errstate(divide='ignore', invalid='ignore'):\n rdatamc = np.divide(self.data_hist, hist)\n rdatamc_err = np.divide(\n 1., hist) * np.sqrt(self.data_err**2 + rdatamc**2 * err**2)\n (_, caps, _) = self.fig.axes[1].errorbar(self.xc, rdatamc, ls='None', xerr=np.ones_like(\n rdatamc)*self.binw*0.5, yerr=rdatamc_err, color=self.colors[cInd], marker='.', markersize=7)\n for cap in caps:\n cap.set_markeredgewidth(0)\n\n self.fig.axes[0].legend()\n" }, { "alpha_fraction": 0.5018092393875122, "alphanum_fraction": 0.5292401313781738, "avg_line_length": 48.235633850097656, "blob_id": "c27b62b4860effca7766fffa1edec0506552010f", "content_id": "84e8d0351226e4ce198ad607938a2eec1034aee1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8567, "license_type": "no_license", "max_line_length": 173, "num_lines": 174, "path": "/corr_plots.py", "repo_name": "threiten/plotting", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib\nimport yaml\nfrom plotting.plot_base import plotBase\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef wcorr(arr1, arr2, weights):\n m1 = np.average(arr1, weights=weights)*np.ones_like(arr1)\n m2 = np.average(arr2, weights=weights)*np.ones_like(arr2)\n cov_11 = float((weights*(arr1-m1)**2).sum()/weights.sum())\n cov_22 = float((weights*(arr2-m2)**2).sum()/weights.sum())\n cov_12 = float((weights*(arr1-m1)*(arr2-m2)).sum()/weights.sum())\n return cov_12/np.sqrt(cov_11*cov_22)\n\n\nclass corrMat:\n\n def __init__(self, df_mc, df_data, varrs, varrs_corr, weightst, label=''):\n\n self.label = label\n self.varrs = varrs\n self.varrs_corr = varrs_corr\n\n mc_crl = np.zeros((len(varrs), len(varrs)))\n mc_c_crl = np.zeros((len(varrs_corr), len(varrs_corr)))\n data_crl = np.zeros((len(varrs), len(varrs)))\n\n for i, var1 in enumerate(self.varrs):\n mc_crl[i, :] = np.hstack((np.array([100*wcorr(df_mc[var1].values, df_mc[var2].values, df_mc[weightst])\n for var2 in varrs[:varrs.index(var1)+1]]), np.zeros(len(varrs)-(varrs.index(var1)+1))))\n data_crl[i, :] = np.hstack((np.array([100*wcorr(df_data[var1].values, df_data[var2].values,\n df_data['weight'].values) for var2 in varrs[:varrs.index(var1)+1]]), np.zeros(len(varrs)-(varrs.index(var1)+1))))\n\n for i, var1 in enumerate(self.varrs_corr):\n mc_c_crl[i, :] = np.hstack((np.array([100*wcorr(df_mc[var1].values, df_mc[var2].values, df_mc[weightst])\n for var2 in varrs_corr[:varrs_corr.index(var1)+1]]), np.zeros(len(varrs_corr)-(varrs_corr.index(var1)+1))))\n\n self.mc_crl = mc_crl\n self.mc_c_crl = mc_c_crl\n self.data_crl = data_crl\n\n # self.mc_crl = np.array([[100*wcorr(df_mc[var1].values, df_mc[var2].values,\n # df_mc[weightst]) for var2 in varrs[:varrs.index(var1)+1]] for var1 in varrs])\n # self.mc_c_crl = np.array([[100*wcorr(df_mc[var1].values, df_mc[var2].values,\n # df_mc[weightst]) for var2 in varrs_corr[:varrs_corr.index(var1)+1]] for var1 in varrs_corr])\n\n # self.data_crl = np.array([[100*wcorr(df_data[var1].values, df_data[var2].values,\n # df_data['weight'].values) for var2 in varrs[:varrs.index(var1)+1]] for var1 in varrs])\n\n self.fig_name = []\n\n self.mc_crl_meanabs = np.mean(np.abs(self.mc_crl))\n self.mc_c_crl_meanabs = np.mean(np.abs(self.mc_c_crl))\n self.data_crl_meanabs = np.mean(np.abs(self.data_crl))\n\n self.diff_crl_meanabs = np.mean(\n np.abs(np.array(self.mc_crl)-np.array(self.data_crl)))\n self.diff_c_crl_meanabs = np.mean(\n np.abs(np.array(self.mc_c_crl)-np.array(self.data_crl)))\n\n self.plotB = plotBase(df_mc, varrs[0], weightst, label, 'profile')\n with open('/t3home/threiten/python/plotting/texReplacement.yaml') as f:\n self.tex_replace_dict = yaml.load(f, Loader=yaml.FullLoader)\n f.close()\n\n self.texlabel = r''\n c_list = self.label.split()\n for st in c_list:\n repl = self.plotB.get_tex_repl(st)\n if repl[0] != '$' and repl[-3:] != '$\\\\':\n self.texlabel += '$' + repl[:-2] + r'$\\\\'\n else:\n self.texlabel += repl\n\n rcP = {'text.usetex': True,\n 'font.family': 'sans-serif',\n 'font.sans-serif': ['Helvetica'],\n 'pdf.fonttype': 42,\n 'axes.labelsize': 20,\n 'font.size': 16,\n 'pgf.rcfonts': True,\n 'text.latex.preamble': r\"\\usepackage{bm, xspace, amsmath}\"}\n\n plt.rcParams.update(rcP)\n\n def plot_corr_mat(self, key):\n\n self.key = key\n fig1 = plt.figure(figsize=(10, 10))\n ax1 = fig1.add_subplot(111)\n\n plt.set_cmap('bwr')\n\n if key == 'data':\n cax1 = ax1.matshow(self.data_crl, vmin=-100, vmax=100)\n self.plot_numbers(ax1, self.data_crl)\n # plt.title(r'Correlation data ' + self.label.replace('_', ' ') +\n # ' Mean abs: {:.3f}'.format(self.data_crl_meanabs), y = 1.4)\n ax1.text(0.95, 0.95, r'\\begin{{flushright}}$\\boldsymbol{{Data}}$\\\\{{{0}}}\\end{{flushright}}'.format(\n self.texlabel).replace(' ', '\\,\\,'), transform=ax1.transAxes, va='top', ha='right', fontsize=20)\n name = 'data_' + self.label\n elif key == 'mc':\n cax1 = ax1.matshow(self.mc_crl, vmin=-100, vmax=100)\n self.plot_numbers(ax1, self.mc_crl)\n # plt.title(r'Correlation mc ' + self.label.replace('_', ' ') +\n # ' Mean abs: {:.3f}'.format(self.mc_crl_meanabs), y=1.4)\n ax1.text(0.95, 0.95, r'\\begin{{flushright}}$\\boldsymbol{{Simulation}}$\\\\{0}\\end{{flushright}}'.format(\n self.texlabel).replace(' ', '\\,\\,'), transform=ax1.transAxes, va='top', ha='right', fontsize=20)\n name = 'mc_' + self.label\n elif key == 'mcc':\n cax1 = ax1.matshow(self.mc_c_crl, vmin=-100, vmax=100)\n self.plot_numbers(ax1, self.mc_c_crl)\n # plt.title(r'Correlation mc corrected ' + self.label.replace('_',\n # ' ') + ' Mean abs: {:.3f}'.format(self.mc_c_crl_meanabs), y=1.4)\n ax1.text(0.95, 0.95, r'\\begin{{flushright}}$\\boldsymbol{{Simulation corrected}}$\\\\{0}\\end{{flushright}}'.format(\n self.texlabel).replace(' ', '\\,\\,'), transform=ax1.transAxes, va='top', ha='right', fontsize=20)\n name = 'mc_corr_' + self.label\n elif key == 'diff':\n cax1 = ax1.matshow(np.array(self.mc_crl) -\n np.array(self.data_crl), vmin=-15, vmax=15)\n self.plot_numbers(ax1, np.array(self.mc_crl) -\n np.array(self.data_crl))\n # plt.title(r'Correlation difference ' + self.label.replace('_',\n # ' ') + ' Mean abs: {:.3f}'.format(self.diff_crl_meanabs), y=1.4)\n ax1.text(0.95, 0.95, r'\\begin{{flushright}}$\\boldsymbol{{Difference}}$\\\\{0}\\end{{flushright}}'.format(\n self.texlabel).replace(' ', '\\,\\,'), transform=ax1.transAxes, va='top', ha='right', fontsize=20)\n name = 'diff_' + self.label\n elif key == 'diffc':\n cax1 = ax1.matshow(np.array(self.mc_c_crl) -\n np.array(self.data_crl), vmin=-15, vmax=15)\n self.plot_numbers(ax1, np.array(self.mc_c_crl) -\n np.array(self.data_crl))\n # plt.title(r'Correlation difference corrected ' + self.label.replace('_',\n # ' ') + ' Mean abs: {:.3f}'.format(self.diff_c_crl_meanabs), y=1.4)\n ax1.text(0.95, 0.95, r'\\begin{{flushright}}$\\boldsymbol{{Difference corrected}}$\\\\{0}\\end{{flushright}}'.format(\n self.texlabel).replace(' ', '\\,\\,'), transform=ax1.transAxes, va='top', ha='right', fontsize=20)\n name = 'diff_corr_' + self.label\n\n cbar = fig1.colorbar(cax1)\n cbar.set_label(r'\\textbf{\\textit{Correlation (\\%)}}')\n\n # for i in range(len(self.varrs)):\n # self.varrs[i] = self.varrs[i].replace('probe', '')\n ax1.set_yticks(np.arange(len(self.varrs)))\n ax1.set_xticks(np.arange(len(self.varrs)))\n\n ticklabels = []\n for varr in self.varrs:\n math, var, unit = self.plotB.parse_repl(\n self.tex_replace_dict[varr])\n ticklabels.append(r'$\\boldsymbol{{{0}}}$'.format(var))\n\n ax1.set_xticklabels(ticklabels, rotation='vertical')\n ax1.set_yticklabels(ticklabels)\n\n self.fig_name.append((fig1, name))\n\n def plot_numbers(self, ax, mat):\n for i in range(mat.shape[0]):\n for j in range(mat.shape[1]):\n c = mat[j, i]\n if np.abs(c) >= 1:\n ax.text(i, j, r'${:.0f}$'.format(c), fontdict={\n 'size': 14}, va='center', ha='center')\n\n def save(self, outDir):\n for fig, name in self.fig_name:\n fig.savefig(outDir + '/crl_' + name.replace(' ',\n '_') + '.png', bbox_inches='tight')\n fig.savefig(outDir + '/crl_' + name.replace(' ',\n '_') + '.pdf', bbox_inches='tight')\n" }, { "alpha_fraction": 0.5290862321853638, "alphanum_fraction": 0.5599363446235657, "avg_line_length": 48.68041229248047, "blob_id": "16707dee841d01693e46409b358f524660eb3582", "content_id": "f413cc1ed06dea75260c5b5eb57a9ae14686250e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14457, "license_type": "no_license", "max_line_length": 402, "num_lines": 291, "path": "/profile_plots.py", "repo_name": "threiten/plotting", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport sys\nimport numpy as np\nimport matplotlib\nimport yaml\nfrom plotting.plot_base import plotBase\nmatplotlib.use('Agg')\n\nbinsqpars = {'probeS4': (0, 2, 200001), 'probeR9': (0, 2, 200001), 'probeEtaWidth': (0, .1, 200001), 'probePhiWidth': (0, .5, 200001), 'probeSigmaIeIe': (0, .1, 400001), 'newPhoID': (-0.8, 1, 200001), 'probePt': (0, 200, 2000001), 'probeScEta': (-2.5, 2.5, 500001), 'probePhi': (-3.14, 3.14, 600001), 'rho': (0, 80, 800001), 'probeCovarianceIpIp': (0, 0.002, 200001), 'probeCovarianceIphiIphi': (\n 0, 0.002, 200001), 'probeCovarianceIeIp': (-0.001, 0.001, 400001), 'probeCovarianceIetaIphi': (-0.001, 0.001, 400001), 'probeChIso03worst': (0, 20, 200001), 'probeChIso03': (0, 20, 20001), 'probeChIso03worst': (0, 20, 20001), 'probeEnergy': (0, 400, 400001), 'probePhoIso': (0, 20, 20001), 'probeScPreshowerEnergy': (0, 50, 250001), 'probeSigmaRR': (0, 1, 100001), 'newPhoIDtrIsoZ': (0, 1, 100001)}\n\ndvar_bins = {'probePt': (25, 75, 51), 'probeScEta': {'EB': (-1.4442, 1.4442, 51), 'EE': (\n 1.57, 2.5, 31)}, 'probePhi': (-3.14, 3.14, 61), 'rho': (0, 40, 41), 'run': (297050, 304797, 200)}\n\n\ndef wquantile(q, vals, bins, weights=None):\n centres = 0.5*(bins[1:]+bins[:-1])\n hist, _ = np.histogram(vals, bins=bins, weights=weights)\n cum_hist = np.cumsum(hist, dtype=float)\n cum_hist_n = cum_hist/cum_hist[-1]\n ind_high_bound = np.searchsorted(cum_hist_n, q)\n ind_low_bound = ind_high_bound-1\n inds = np.sort(np.ravel(np.array([ind_low_bound, ind_high_bound])))\n q_vals = np.interp(q, cum_hist_n[inds], centres[inds])\n return q_vals\n\n\ndef wquantile_unb(q, vals, weights):\n df = pd.DataFrame()\n df['weight'] = weights\n df['val'] = vals\n sort_df = df.sort_values('val')\n w_cum = np.cumsum(sort_df['weight'].values)\n cdf = np.vstack(((w_cum/w_cum[-1]), sort_df['val'].values))\n ind = np.searchsorted(cdf[0], q)\n return cdf[1][ind]\n\n\nclass profilePlot:\n\n def __init__(self, df_mc, df_data, nq, bintpe, var, diff_var, weightst_mc, EBEE, zoom=False, corrlabel=None, addlabel=None, label='', addlegd=None, corrname=None, weightst_data=None):\n\n self.var = var\n self.diff_var = diff_var\n self.nq = nq\n self.bintpe = bintpe\n self.weightst_mc = weightst_mc\n self.EBEE = EBEE\n self.corrlabel = corrlabel\n self.addlabel = addlabel\n self.qts = [0.5, .25, .75, .1, .9]\n self.label = label\n self.addlegd = addlegd\n self.zoom = zoom\n self.name = 'profile_' + diff_var + '_' + var + '_' + bintpe + '_' + label\n self.title = var + ' ' + label.replace('_', ' ')\n self.corrname = r'\\textbf{\\textit{sim corr}}'\n self.colors = list(matplotlib.cm.Dark2.colors)\n\n self.plotB = plotBase(df_mc, var, weightst_mc, label, 'profile')\n with open('/t3home/threiten/python/plotting/texReplacement.yaml') as f:\n self.tex_replace_dict = yaml.load(f, Loader=yaml.FullLoader)\n f.close()\n\n self.xloc = 'right'\n self.yloc = 'top'\n\n if corrname != None:\n self.corrname = 'sim ' + corrname\n if self.zoom:\n self.name = self.name + '_zoom'\n\n self.mc = df_mc.loc[:, [self.var, self.diff_var, weightst_mc]]\n\n self.corr = False\n if corrlabel != None:\n self.mc[self.var+self.corrlabel] = df_mc[self.var+self.corrlabel]\n self.corr = True\n\n self.add = False\n if addlabel != None:\n self.mc[self.var+self.addlabel] = df_mc[self.var+self.addlabel]\n self.add = True\n\n if weightst_data == None:\n self.data = df_data.loc[:, [self.var, self.diff_var]]\n self.data['weight_dumm'] = np.ones(self.data.index.size)\n self.weightst_data = 'weight_dumm'\n else:\n self.data = df_data.loc[:, [\n self.var, self.diff_var, weightst_data]]\n self.weightst_data = weightst_data\n\n if self.bintpe == 'equ':\n self.data[self.diff_var+'_bin'], self.diff_bins = pd.qcut(\n self.data[self.diff_var], self.nq, labels=np.arange(self.nq), retbins=True)\n self.centers = 0.5*(self.diff_bins[1:]+self.diff_bins[:-1])\n self.mc[self.diff_var+'_bin'] = pd.cut(\n self.mc[self.diff_var], bins=self.diff_bins, labels=np.arange(self.nq))\n\n elif self.bintpe == 'lin':\n if self.diff_var == 'probeScEta':\n if self.EBEE == 'EB':\n self.diff_bins = np.linspace(\n *dvar_bins[self.diff_var][self.EBEE])\n elif self.EBEE == 'EE':\n self.diff_bins = np.hstack(\n (-np.flip(np.linspace(*dvar_bins[self.diff_var][self.EBEE]), 0), np.linspace(*dvar_bins[self.diff_var][self.EBEE])))\n else:\n self.diff_bins = np.linspace(*dvar_bins[self.diff_var])\n\n self.centers = 0.5*(self.diff_bins[1:]+self.diff_bins[:-1])\n\n self.data[self.diff_var+'_bin'] = pd.cut(\n self.data[self.diff_var], bins=self.diff_bins, labels=self.centers)\n self.mc[self.diff_var+'_bin'] = pd.cut(\n self.mc[self.diff_var], bins=self.diff_bins, labels=self.centers)\n if self.diff_var == 'probeScEta':\n self.centers = np.delete(self.centers, np.where((abs(self.centers) < dvar_bins[self.diff_var][self.EBEE][0]) | (\n abs(self.centers) > dvar_bins[diff_var][self.EBEE][1])))\n\n# self.nq = len(self.centers)\n\n else:\n print('Please choose bintype')\n sys.exit()\n\n self.datagb = self.data.groupby(self.diff_var+'_bin')\n self.mcgb = self.mc.groupby(self.diff_var+'_bin')\n\n # try:\n # self.binsq = np.linspace(*binsqpars[self.var])\n # except KeyError:\n # print('No predefined binparameters for ' + self.var + '. Please set yourself using set_binsq(min,max,#bins)')\n\n def get_quantiles(self):\n\n if self.bintpe == 'equ':\n self.data_quantiles = np.vstack([wquantile_unb(self.qts, self.datagb[self.var].get_group(\n i).values, weights=self.datagb[self.weightst_data].get_group(i).values) for i in np.arange(self.nq)])\n self.mc_quantiles = np.vstack([wquantile_unb(self.qts, self.mcgb[self.var].get_group(\n i).values, weights=self.mcgb[self.weightst_mc].get_group(i).values) for i in np.arange(self.nq)])\n if self.corr:\n self.mc_c_quantiles = np.vstack([wquantile_unb(self.qts, self.mcgb[self.var+self.corrlabel].get_group(\n i).values, weights=self.mcgb[self.weightst_mc].get_group(i).values) for i in np.arange(self.nq)])\n if self.add:\n self.mc_add_quantiles = np.vstack([wquantile_unb(self.qts, self.mcgb[self.var+self.addlabel].get_group(\n i).values, weights=self.mcgb[self.weightst_mc].get_group(i).values) for i in np.arange(self.nq)])\n elif self.bintpe == 'lin':\n self.data_quantiles = np.vstack([wquantile_unb(self.qts, self.datagb[self.var].get_group(\n i).values, self.datagb[self.weightst_data].get_group(i).values) for i in self.centers])\n self.mc_quantiles = np.vstack([wquantile_unb(self.qts, self.mcgb[self.var].get_group(\n i).values, self.mcgb[self.weightst_mc].get_group(i).values) for i in self.centers])\n if self.corr:\n self.mc_c_quantiles = np.vstack([wquantile_unb(self.qts, self.mcgb[self.var+self.corrlabel].get_group(\n i).values, weights=self.mcgb[self.weightst_mc].get_group(i).values) for i in self.centers])\n if self.add:\n self.mc_add_quantiles = np.vstack([wquantile_unb(self.qts, self.mcgb[self.var+self.addlabel].get_group(\n i).values, weights=self.mcgb[self.weightst_mc].get_group(i).values) for i in self.centers])\n\n def plot_profile(self, xunit='', yunit=''):\n\n rcP = {'text.usetex': True,\n 'font.family': 'sans-serif',\n 'font.sans-serif': ['Helvetica'],\n 'pdf.fonttype': 42,\n 'axes.labelsize': 20,\n 'font.size': 16,\n 'pgf.rcfonts': True,\n 'text.latex.preamble': r\"\\usepackage{bm, xspace, amsmath}\"}\n\n plt.rcParams.update(rcP)\n\n fig, axes = plt.subplots(1, figsize=(9, 7))\n\n hatchArr = [\n '////', r'//'.replace(' ', ''), r'///'.replace(' ', ''), '///']\n\n axes.plot(self.centers, self.data_quantiles[:, 0],\n '-', markersize=0, color=self.colors[0], label=r'\\textbf{\\textit{data}}', linewidth=4)\n axes.fill_between(\n self.centers, self.data_quantiles[:, 1], self.data_quantiles[:, 2], hatch=hatchArr[0], edgecolor=self.colors[0], alpha=0.7, facecolor='None', linewidth=0)\n axes.plot(\n self.centers, self.data_quantiles[:, 3], '--', linewidth=3, color=self.colors[0])\n axes.plot(\n self.centers, self.data_quantiles[:, 4], '--', linewidth=3, color=self.colors[0])\n\n axes.plot(self.centers, self.mc_quantiles[:, 0], '-',\n markersize=0, color=self.colors[1], label=r'\\textbf{\\textit{sim}}', linewidth=4)\n axes.fill_between(\n self.centers, self.mc_quantiles[:, 1], self.mc_quantiles[:, 2], hatch=hatchArr[1], edgecolor=self.colors[1], alpha=0.7, facecolor='None', linewidth=0)\n axes.plot(self.centers,\n self.mc_quantiles[:, 3], '--', linewidth=3, color=self.colors[1])\n axes.plot(self.centers,\n self.mc_quantiles[:, 4], '--', linewidth=3, color=self.colors[1])\n\n if self.add:\n axes.plot(self.centers, self.mc_add_quantiles[:, 0], '-',\n markersize=0, color=self.colors[3], label=self.addlegd, linewidth=4)\n axes.fill_between(\n self.centers, self.mc_add_quantiles[:, 1], self.mc_add_quantiles[:, 2], hatch=hatchArr[3], edgecolor=self.colors[3], alpha=0.7, facecolor='None', linewidth=0)\n axes.plot(\n self.centers, self.mc_add_quantiles[:, 3], '--', linewidth=3, color=self.colors[3])\n axes.plot(\n self.centers, self.mc_add_quantiles[:, 4], '--', linewidth=3, color=self.colors[3])\n\n if self.corr:\n axes.plot(self.centers, self.mc_c_quantiles[:, 0], '-',\n markersize=0, color=self.colors[2], label=self.corrname, linewidth=4)\n axes.fill_between(\n self.centers, self.mc_c_quantiles[:, 1], self.mc_c_quantiles[:, 2], hatch=hatchArr[2], edgecolor=self.colors[2], alpha=0.7, facecolor='None', linewidth=0)\n axes.plot(\n self.centers, self.mc_c_quantiles[:, 3], '--', linewidth=3, color=self.colors[2])\n axes.plot(\n self.centers, self.mc_c_quantiles[:, 4], '--', linewidth=3, color=self.colors[2])\n\n self.set_ylim()\n axes.set_ylim(0.85*self.ylim[0], self.ylim[1])\n if self.diff_var == 'probePt':\n axes.set_xlim(25, 60)\n # if xunit == None:\n # xunit = ''\n # if yunit == None:\n # yunit = ''\n # axes.xlabel(r'%s' % (self.diff_var.replace('_', '\\textunderscore ')))\n # axes.ylabel(r'%s' % (self.var.replace('_', '\\textunderscore ')))\n\n if self.var in self.tex_replace_dict:\n math, var, unit = self.plotB.parse_repl(\n self.tex_replace_dict[self.var])\n if unit == '':\n axes.set_ylabel(\n r'$\\boldsymbol{{{0}}}$'.format(var, unit), fontsize=20, loc=self.yloc)\n else:\n axes.set_ylabel(r'$\\boldsymbol{{{0}}}\\,\\,\\left[\\textnormal{{{1}}}\\right]$'.format(\n var, unit), fontsize=20, loc=self.yloc)\n else:\n axes.set_ylabel(r'\\textit{{{0}}}'.format(\n self.var.replace('_', '\\_')), fontsize=20, loc=self.yloc)\n\n if self.diff_var in self.tex_replace_dict:\n math, var, unit = self.plotB.parse_repl(\n self.tex_replace_dict[self.diff_var])\n if unit == '':\n axes.set_xlabel(\n r'$\\boldsymbol{{{0}}}$'.format(var, unit), fontsize=20, loc=self.xloc)\n else:\n axes.set_xlabel(r'$\\boldsymbol{{{0}}}\\,\\,\\left[\\textnormal{{{1}}}\\right]$'.format(\n var, unit), fontsize=20, loc=self.xloc)\n else:\n axes.set_xlabel(r'\\textit{{{0}}}'.format(\n self.var.replace('_', '\\_')), fontsize=20, loc=self.xloc)\n\n # plt.title(self.title,y=1.05)\n # fig.text(\n # 0.13, .91, r'\\textbf{CMS} \\textit{Work in Progress}', fontsize=11)\n axes.legend(loc='best', framealpha=0)\n LegHandles, LegLabels = axes.get_legend_handles_labels()\n for i, lab in enumerate(LegLabels):\n legPatch = matplotlib.patches.Patch(edgecolor=LegHandles[i].get_color(\n ), facecolor='None', hatch=hatchArr[i], linewidth=0, zorder=3)\n legLine = matplotlib.lines.Line2D([], [], color=LegHandles[i].get_color(\n ), marker='None', zorder=3, linewidth=2)\n # legLineT = matplotlib.lines.Line2D([], [], color=LegHandles[i].get_color(\n # ), marker='None', zorder=3, linewidth=1, linestyle='--')\n LegHandles[i] = (legPatch, legLine)\n axes.legend(labels=LegLabels, handles=LegHandles,\n fontsize=16, framealpha=0, loc='best')\n\n self.fig = fig\n\n def set_ylim(self):\n if self.zoom:\n self.ylim = [self.data_quantiles[:, 0].min()-.05*self.data_quantiles[:, 0].min(\n ), self.data_quantiles[:, 0].max()+.05*self.data_quantiles[:, 0].max()]\n else:\n self.ylim = [self.data_quantiles[:, 3].min()-.05*self.data_quantiles[:, 3].min(\n ), self.data_quantiles[:, 4].max()+.05*self.data_quantiles[:, 4].max()]\n\n def set_diffbins(self, mn, mx, nbins):\n self.diff_bins = np.linspace(mn, mx, nbins+1)\n\n def save(self, outDir):\n self.fig.savefig(outDir + '/' + self.name +\n '.png', bbox_inches='tight')\n self.fig.savefig(outDir + '/' + self.name +\n '.pdf', bbox_inches='tight')\n\n def set_binsq(self, low, high, nob):\n self.binsq = np.linspace(low, high, nob+1)\n" }, { "alpha_fraction": 0.5956978797912598, "alphanum_fraction": 0.6069420576095581, "avg_line_length": 39.90999984741211, "blob_id": "1921635dadb5f752db610dee5319508d507d5c5f", "content_id": "f3c75d8c814d8d47bcc881549c84af4ba42761cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8182, "license_type": "no_license", "max_line_length": 138, "num_lines": 200, "path": "/run_plotter.py", "repo_name": "threiten/plotting", "src_encoding": "UTF-8", "text": "from joblib import delayed, Parallel\nfrom qRC.tmva.IdMVAComputer import helpComputeIdMva\nimport qRC.syst.qRC_systematics as syst\nimport plotting.plot_dmc_hist as pldmc\nimport plotting.parse_yaml as pyml\nimport root_pandas\nimport argparse\nimport itertools\nimport warnings\nimport sys\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nwarnings.filterwarnings('ignore')\n\n\ndef remove_duplicates(vars):\n mask = len(vars) * [True]\n for i in range(len(vars)):\n for j in range(i+1, len(vars)):\n if vars[i] == vars[j]:\n mask[j] = False\n\n return list(itertools.compress(vars, mask))\n\n\ndef chunked_loader(fpath, columns, **kwargs):\n fitt = pd.read_hdf(fpath, columns=columns,\n chunksize=10000, iterator=True, **kwargs)\n df = pd.concat(fitt, copy=False)\n\n return df\n\n\ndef make_unique_names(plt_list):\n\n mtl_list = len(plt_list) * [0]\n for i in range(len(plt_list)):\n mult = 0\n for j in range(i):\n if plt_list[i]['type'] == plt_list[j]['type'] and plt_list[i]['var'] == plt_list[j]['var']:\n mult += 1\n\n mtl_list[i] = mult\n\n for i in range(len(plt_list)):\n plt_list[i]['num'] = mtl_list[i]\n\n return plt_list\n\n\ndef make_vars(plot_dict, extra=[], extens=True):\n ret = []\n for dic in plot_dict:\n ret.append(dic['var'])\n if 'exts' in dic.keys() and extens:\n for ext in dic['exts']:\n ret.append(dic['var'] + ext)\n\n ret.extend(extra)\n\n return remove_duplicates(ret)\n\n\ndef check_vars(df, varrs):\n\n varmiss = len(varrs) * [False]\n for i, var in enumerate(varrs):\n if not var in df.columns:\n varmiss[i] = True\n\n return varmiss\n\n\ndef main(options):\n\n plot_dict = make_unique_names(pyml.yaml_parser(options.config)())\n varrs = make_vars(plot_dict, ['weight', 'weight_clf', 'probePassEleVeto', 'tagPt', 'tagScEta',\n 'probeScEnergy', 'probeSigmaRR']) # 'probeEtaWidth_Sc', 'probePhiWidth_Sc',\n varrs_data = make_vars(plot_dict, ['weight', 'probePassEleVeto', 'tagPt', 'tagScEta',\n 'probeScEnergy', 'probeSigmaRR'], extens=False) # , 'probeEtaWidth_Sc', 'probePhiWidth_Sc'\n\n # if 'probePhoIso03_uncorr' in varrs:\n # varrs.pop(varrs.index('probePhoIso03_uncorr'))\n\n if options.mc.split('.')[-1] == 'root':\n if options.mc_tree is None:\n raise NameError(\n 'mc_tree has to be in options if a *.root file is used as input')\n df_mc = root_pandas.read_root(\n options.mc, options.mc_tree, columns=varrs)\n else:\n df_mc = pd.read_hdf(options.mc, columns=varrs)\n\n if options.data.split('.')[-1] == 'root':\n if options.data_tree is None:\n raise NameError(\n 'data_tree has to be in options if a *.root file is used as input')\n df_data = root_pandas.read_root(\n options.data, options.data_tree, columns=varrs_data)\n else:\n df_data = pd.read_hdf(options.data, columns=varrs_data)\n\n if 'weight_clf' not in df_mc.columns and not options.no_reweight:\n if options.reweight_cut is not None:\n df_mc['weight_clf'] = syst.utils.clf_reweight(\n df_mc, df_data, n_jobs=10, cut=options.reweight_cut)\n else:\n warnings.warn(\n 'Cut for reweighting is taken from 0th and 1st plot. Make sure this is the right one')\n if 'abs(probeScEta)<1.4442' in plot_dict[0]['cut'] and 'abs(probeScEta)>1.56' in plot_dict[1]['cut']:\n df_mc.loc[np.abs(df_mc['probeScEta']) < 1.4442, 'weight_clf'] = syst.utils.clf_reweight(\n df_mc.query('abs(probeScEta)<1.4442'), df_data, n_jobs=10, cut=plot_dict[0]['cut'])\n df_mc.loc[np.abs(df_mc['probeScEta']) > 1.56, 'weight_clf'] = syst.utils.clf_reweight(\n df_mc.query('abs(probeScEta)>1.56'), df_data, n_jobs=10, cut=plot_dict[1]['cut'])\n else:\n warnings.warn(\n 'Cut from 0th plot used to reweight whole dataset. Make sure this makes sense')\n df_mc['weight_clf'] = syst.utils.clf_reweight(\n df_mc, df_data, n_jobs=10, cut=plot_dict[0]['cut'])\n\n # if 'probePhiWidth' in varrs:\n # df_data['probePhiWidth'] = df_data['probePhiWidth_Sc']\n\n # if 'probeEtaWidth' in varrs:\n # df_data['probeEtaWidth'] = df_data['probeEtaWidth_Sc']\n\n # if 'probePhoIso03' in varrs:\n # df_mc['probePhoIso03_uncorr'] = df_mc['probePhoIso_uncorr']\n\n for var in ['probeChIso03', 'probeSigmaIeIe']:\n df_data[var+'_corr'] = df_data[var]\n\n if options.recomp_mva:\n stride = int(df_mc.index.size/10)\n print(stride)\n correctedVariables = ['probeR9', 'probeS4', 'probeCovarianceIeIp', 'probeEtaWidth',\n 'probePhiWidth', 'probeSigmaIeIe', 'probePhoIso', 'probeChIso03', 'probeChIso03worst']\n weightsEB = \"/work/threiten/QReg/ReReco17_data/camp_3_1_0/PhoIdMVAweights/HggPhoId_94X_barrel_BDT_v2.weights.xml\"\n weightsEE = \"/work/threiten/QReg/ReReco17_data/camp_3_1_0/PhoIdMVAweights/HggPhoId_94X_endcap_BDT_v2.weights.xml\"\n df_mc['probeScPreshowerEnergy'] = np.zeros(df_mc.index.size)\n df_mc['probePhoIdMVA_uncorr'] = np.concatenate(Parallel(n_jobs=10, verbose=20)(delayed(helpComputeIdMva)(\n weightsEB, weightsEE, correctedVariables, df_mc[ch:ch+stride], 'uncorr', False) for ch in range(0, df_mc.index.size, stride)))\n # df_mc['probePhoIdMVA_uncorr'] = helpComputeIdMva(weightsEB,weightsEE,correctedVariables,df_mc,'uncorr', False)\n\n varrs_miss = check_vars(df_mc, varrs)\n varrs_data_miss = check_vars(df_data, varrs_data)\n if any(varrs_miss + varrs_data_miss):\n print('Missing variables from mc df: ', list(\n itertools.compress(varrs, varrs_miss)))\n print('Missing variables from data df: ', list(\n itertools.compress(varrs_data, varrs_data_miss)))\n raise KeyError('Variables missing !')\n\n plots = []\n for i, dic in enumerate(plot_dict):\n print('Initializing plot {}/{}'.format(i+1, len(plot_dict)), end='\\r')\n plots.append(pldmc.plot_dmc_hist(\n df_mc, df_data=df_data, label=options.label, **dic))\n\n sys.stdout.flush()\n for i, plot in enumerate(plots):\n print('Drawing plot {}/{}'.format(i+1, len(plot_dict)), end='\\r')\n plot.draw()\n plot.save(options.outdir, save_dill=options.save_dill)\n matplotlib.pyplot.close(plot.fig)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n requiredArgs = parser.add_argument_group()\n requiredArgs.add_argument(\n '-m', '--mc', action='store', type=str, required=True)\n requiredArgs.add_argument(\n '-d', '--data', action='store', type=str, required=True)\n requiredArgs.add_argument(\n '-c', '--config', action='store', type=str, required=True)\n requiredArgs.add_argument(\n '-o', '--outdir', action='store', type=str, required=True)\n optionalArgs = parser.add_argument_group()\n # optionalArgs.add_argument(\n # '-r', '--ratio', action='store_true', default=False)\n # optionalArgs.add_argument(\n # '-n', '--norm', action='store_true', default=False)\n optionalArgs.add_argument(\n '-p', '--save_dill', action='store_true', default=False)\n optionalArgs.add_argument('-w', '--no_reweight',\n action='store_true', default=False)\n # optionalArgs.add_argument(\n # '-k', '--cutstr', action='store_true', default=False)\n optionalArgs.add_argument('-M', '--recomp_mva',\n action='store_true', default=False)\n optionalArgs.add_argument('-l', '--label', action='store', type=str)\n optionalArgs.add_argument('-N', '--n_evts', action='store', type=int)\n optionalArgs.add_argument('-t', '--mc_tree', action='store', type=str)\n optionalArgs.add_argument('-s', '--data_tree', action='store', type=str)\n optionalArgs.add_argument('-u', '--reweight_cut', action='store', type=str)\n options = parser.parse_args()\n main(options)\n" }, { "alpha_fraction": 0.5421788692474365, "alphanum_fraction": 0.5426027774810791, "avg_line_length": 31.3150691986084, "blob_id": "fbabfc34fe31338edb4aa4115792c343b539164e", "content_id": "15505e696a84e6a1389eeb18009852f5071d69d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2359, "license_type": "no_license", "max_line_length": 148, "num_lines": 73, "path": "/parse_yaml.py", "repo_name": "threiten/plotting", "src_encoding": "UTF-8", "text": "from collections import OrderedDict\nfrom itertools import compress\n\nimport itertools\nimport ast\nimport yaml\n\n\nclass yaml_parser():\n\n def __init__(self, fname):\n\n self.fname = fname\n\n org_dict = yaml.load(open(fname), Loader=yaml.FullLoader)\n self.final_dicts = org_dict['plots']\n\n if org_dict['globalOpts'] is not None:\n for glKey in org_dict['globalOpts']:\n for key in self.final_dicts.keys():\n if glKey not in self.final_dicts[key].keys():\n self.final_dicts[key][glKey] = org_dict['globalOpts'][glKey]\n\n if org_dict['cuts'] is not None:\n for key in self.final_dicts.keys():\n if 'cut' in self.final_dicts[key]:\n self.final_dicts[key]['cut'] = tuple(sti.strip() for sti in self.final_dicts[key]['cut'].split(\n ',')) + tuple(sti.strip() for sti in org_dict['cuts'].split(','))\n else:\n self.final_dicts[key]['cut'] = tuple(\n sti.strip() for sti in org_dict['cuts'].split(','))\n\n @staticmethod\n def parse_var(var):\n\n if isinstance(var, (int, float, list)):\n return [var]\n\n try:\n return list(ast.literal_eval(str(var)))\n except (ValueError, TypeError, SyntaxError):\n return [sti.strip() for sti in var.split(',')]\n\n @classmethod\n def get_combs(cls, dic):\n\n ord_dict = OrderedDict(dic)\n lis = [cls.parse_var(ord_dict[key]) for key in ord_dict.keys()]\n combs = list(itertools.product(*lis))\n dic_list = []\n for tpl in combs:\n litpl = list(tpl)\n dic = {}\n for key in ord_dict.keys():\n dic[key] = litpl[list(ord_dict.keys()).index(key)]\n dic_list.append(dic)\n\n return dic_list\n\n @staticmethod\n def clean_dict_list(dict_list):\n\n mask = [True] * len(dict_list)\n\n for i in range(len(dict_list)):\n for j in range(i+1, len(dict_list)):\n if dict_list[i] == dict_list[j]:\n mask[j] = False\n\n return list(compress(dict_list, mask))\n\n def __call__(self):\n return self.clean_dict_list(list(itertools.chain.from_iterable([self.get_combs(self.final_dicts[key]) for key in self.final_dicts.keys()])))\n" } ]
6
coastalcph/dialog-rl
https://github.com/coastalcph/dialog-rl
efeb7e1ea9feaa920b95cccb4c7c2f08b5bcd2d4
3fda2541710d083cc0233ffc680666ea87228119
994fa92dbff4f1241adba685f3cfc94349b28b9d
refs/heads/master
2022-12-10T12:02:55.841072
2019-09-17T07:29:23
2019-09-17T07:29:23
150,749,790
3
2
null
2018-09-28T14:07:04
2020-10-07T17:31:04
2022-09-23T22:29:33
Python
[ { "alpha_fraction": 0.515956699848175, "alphanum_fraction": 0.5218613743782043, "avg_line_length": 32.079071044921875, "blob_id": "b58305595280d164fdcbf04da79b2a59bf9c62bf", "content_id": "3b3b4654a878c262f84c2d0070bde032715b1458", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7113, "license_type": "no_license", "max_line_length": 112, "num_lines": 215, "path": "/util/eval.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "import numpy as np\nimport json\nfrom pprint import pprint\n\ndef zero_if_nan(n):\n return 0 if np.isnan(n) else n\n\ndef filter_labels(gold, eval_doms):\n filtered = {}\n for s, v in gold.items():\n if s.split('-')[0] in eval_doms:\n filtered[s] = v\n return filtered\n\ndef delex_labels(gold):\n allowed_slots = [\n \"attraction-area\",\n \"attraction-name\",\n \"attraction-type\",\n \"hotel-area\",\n \"hotel-day\",\n \"hotel-internet\",\n \"hotel-name\",\n \"hotel-parking\",\n \"hotel-people\",\n \"hotel-pricerange\",\n \"hotel-stars\",\n \"hotel-stay\",\n \"hotel-type\",\n \"restaurant-area\",\n \"restaurant-day\",\n \"restaurant-food\",\n \"restaurant-name\",\n \"restaurant-people\",\n \"restaurant-pricerange\",\n \"restaurant-time\",\n \"taxi-arriveBy\",\n \"taxi-leaveAt\",\n \"taxi-type\",\n \"train-arriveBy\",\n \"train-day\",\n \"train-leaveAt\",\n \"train-people\"]\n out = {}\n for s, v in gold.items():\n if s in allowed_slots:\n out[s] = v\n else:\n out[s] = \"<true>\"\n return out\n\ndef evaluate_preds(dialogs, preds, turn_predictions, eval_domains=None,\n write_out=None, delex=False):\n inform = []\n joint_goal = []\n belief_state = []\n dialog_reward = []\n final_binary_slot_precision = []\n final_binary_slot_recall = []\n binary_slot_precision = []\n binary_slot_recall = []\n turn_joint_goal = []\n f = None\n if write_out:\n f = open(write_out, \"w\")\n\n dialogs_out = []\n\n for di, d in enumerate(dialogs[:-1]):\n\n dialog_out = {\"turns\": []}\n for ti, turn in enumerate(d.turns):\n\n turn_out = {\"user_utt\": ' '.join(turn.user_utt),\n \"system_act\": turn.system_act,\n \"system_utt\": turn.system_utt}\n turn_gold = delex_labels(turn.labels_str) if delex else turn.labels_str\n\n gold_inform = turn_gold\n pred_inform = turn_predictions[di][ti]\n\n turn_out[\"gold\"] = turn_gold\n turn_out[\"pred\"] = pred_inform\n\n golds = filter_labels(turn_gold, eval_domains)\n\n for s, v in gold_inform.items():\n s_domain = s.split(\"-\")[0]\n #if eval_domains and s_domain not in eval_domains:\n if s_domain not in eval_domains:\n continue\n s_in_pred_inform = s in pred_inform\n binary_slot_recall.append(s_in_pred_inform)\n if s_in_pred_inform:\n inform.append(v == pred_inform[s])\n else:\n inform.append(False)\n\n ## Turn level inform accuracy\n golds = set([(s, v) for s, v in golds.items()])\n #if len(golds) == 0 and not ti == len(d.turns) - 1 and len(pred_inform) > 0:\n # print(\"Dialog\", di)\n # print(\"turn\", ti, \"out of\", len(d.turns))\n # pprint(turn_out)\n predictions = set([(s, v) for s, v in pred_inform.items()])\n\n turn_joint_goal.append(golds == predictions)\n\n #if di == 5:\n #if golds == predictions and len(golds) > 0:\n # print(turn_out)\n # print(\"{} Gold:\\t{}\".format(ti, golds))\n # print(\"{} Pred:\\t{}\".format(ti, predictions))\n # print(\"\")\n\n\n for s in pred_inform:\n s_domain = s.split(\"-\")[0]\n if s_domain not in eval_domains:\n continue\n binary_slot_precision.append(s in gold_inform)\n\n dialog_out[\"turns\"].append(turn_out)\n\n # evaluate final dialog-level performance\n gold_final_belief = {b['slots'][0]: b['slots'][1]\n for b in d.turns[-1].belief_state}\n\n gold_final_belief = delex_labels(gold_final_belief) if delex else gold_final_belief\n\n for s, v in gold_final_belief.items():\n s_domain = s.split(\"-\")[0]\n if s_domain not in eval_domains:\n continue\n #if s in preds[di]:\n # belief_state.append(v == preds[di][s])\n final_binary_slot_recall.append(s in preds[di])\n\n for s in preds[di]:\n s_domain = s.split(\"-\")[0]\n if s_domain not in eval_domains:\n continue\n final_binary_slot_precision.append(s in gold_final_belief)\n\n gold_final_belief = set([(s, v) for s, v in gold_final_belief.items()])\n dialog_preds = set([(s,v) for s, v in preds[di].items()])\n #print('Gold:', gold_final_belief)\n #print('Pred:', dialog_preds)\n #print()\n # How well did we predict the final belief state\n intersect_bs = dialog_preds.intersection(gold_final_belief)\n union_bs = dialog_preds.union(gold_final_belief)\n if len(gold_final_belief) == len(dialog_preds) == 0:\n dia_rew = 1\n else:\n dia_rew = len(intersect_bs) / len(union_bs)\n #(len(gold_final_belief) + len(dialog_preds - common_bs))\n\n #if dia_rew > 0.5:\n # print(dia_rew)\n # print(dialog_preds)\n # print(gold_final_belief)\n\n belief_state.append(gold_final_belief == dialog_preds)\n dialog_reward.append(dia_rew)\n\n dialog_out[\"gold_final_belief\"] = gold_final_belief\n dialog_out[\"pred_final_belief\"] = {s: v for s, v in preds[di].items()}\n dialogs_out.append(dialog_out)\n\n if f:\n # print(dialogs_out)\n try:\n json.dump(dialogs_out, f)\n except:\n pass\n f.close()\n\n final_R = np.mean(final_binary_slot_recall)\n final_P = np.mean(final_binary_slot_precision)\n final_binary_slot_F1 = 2 * final_R * final_P / (final_R + final_P)\n if np.isnan(final_binary_slot_F1):\n final_binary_slot_F1 = 0\n\n R = np.mean(binary_slot_recall)\n P = np.mean(binary_slot_precision)\n binary_slot_F1 = 2*R*P / (R+P)\n joint_goal = inform\n out_dict = {\n # 'turn_inform': np.mean(inform),\n # 'turn_request': np.mean(request),\n 'joint_goal': np.mean(joint_goal),\n 'turn_joint_goal': np.mean(turn_joint_goal),\n 'belief_state': np.mean(belief_state),\n 'dialog_reward': np.mean(dialog_reward),\n 'final_binary_slot_f1': final_binary_slot_F1,\n 'binary_slot_p': P,\n 'binary_slot_r': R,\n 'binary_slot_f1': binary_slot_F1\n }\n\n # fix NaNs\n return {k: zero_if_nan(v) for k, v in out_dict.items()}\n\n\ndef shape_reward(reward, scale_in=(0, 1), scale_out=(-2, 2), continuous=False):\n # Linear interpolation\n scaled = (reward - scale_in[0]) / (scale_in[1] - scale_in[0]) * (scale_out[1] - scale_out[0]) + scale_out[0]\n if not continuous:\n scaled = round(scaled)\n return scaled\n\ndef get_reward(e_scores):\n #return e_scores['joint_goal'] * w[0] + e_scores['belief_state'] * w[1]\n return e_scores['dialog_reward']\n\n" }, { "alpha_fraction": 0.5559055209159851, "alphanum_fraction": 0.5654668211936951, "avg_line_length": 45.54450225830078, "blob_id": "a95c9d2ea0fe95ab4bab17038008003ae49e3248", "content_id": "0d2b6d7bb701cf1792acc7d3df7ab3b44fe06b68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8890, "license_type": "no_license", "max_line_length": 98, "num_lines": 191, "path": "/run.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "import os\nfrom util import util\nfrom util.featurize import *\nimport random\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\n\n\ndef main(args):\n print(args)\n device = util.get_device(args.gpu)\n domains = args.train_domains\n strict = args.train_strict\n print('Training on domains: ', domains)\n print('Single-domain dialogues only?', strict)\n #random.seed(args.seed)\n splits = [\"train\", \"dev\"]\n if args.test or args.pred:\n splits = [\"test\"]\n\n if args.elmo:\n data, s2v = util.load_dataset_elmo(splits=splits, base_path=args.path)\n else:\n data, ontology, vocab, embeddings = util.load_dataset(\n splits=splits, base_path=args.path)\n s2v = ontology.values\n\n data_filtered = {}\n data_featurized = {}\n\n # filter data for domains\n for split in splits:\n _data = [dg.to_dict() for dg in data[split].iter_dialogs()]\n max_dialogs = {\"train\": args.max_train_dialogs,\n \"dev\": args.max_dev_dialogs}.get(split, -1)\n data_filtered[split] = util.filter_dialogs(_data, domains, strict,\n max_dialogs,\n args.max_dialog_length)\n\n if split == \"train\":\n random.shuffle(data_filtered[split])\n\n # If not using ELMo featurized dataset, create slot-to-value featurization\n if not args.elmo:\n # Retrieve and clean slot-value pairs\n if args.delexicalize_labels:\n s2v = util.delexicalize(s2v)\n s2v = util.fix_s2v(s2v, data_filtered, splits=splits)\n\n # Featurize slots and values\n slot_featurizer = SlotFeaturizer(embeddings)\n value_featurizer = ValueFeaturizer(embeddings)\n s2v = util.featurize_s2v(s2v, slot_featurizer, value_featurizer)\n\n print(\"device : \", device)\n s2v = util.s2v_to_device(s2v, device)\n\n print(\"Featurizing...\")\n for split in splits:\n if args.elmo:\n data_featurized[split] = featurize_dialogs_elmo(\n data_filtered[split], s2v, device, args)\n else:\n data_featurized[split] = featurize_dialogs(\n data_filtered[split], s2v, device, args, w2v=embeddings)\n\n _key = list(data_featurized.keys())[0]\n DIM_INPUT = len(data_featurized[_key][0].turns[0].x_act)\n model = util.load_model(DIM_INPUT, DIM_INPUT, DIM_INPUT, DIM_INPUT,\n args.dhid, args.receptors, args)\n if args.resume:\n model.load_best_save(directory=args.resume)\n\n model = model.to(device)\n\n if args.test:\n results = model.run_eval(data_featurized[\"test\"], s2v,\n args.eval_domains, args.outfile)\n print(results)\n elif args.pred:\n raise NotImplementedError\n # model.run_predict(data_featurized[\"test\"], s2v, args)\n else:\n print(\"Training...\")\n if args.reinforce:\n if args.baseline:\n print('Loading baseline model')\n baseline = util.load_model(DIM_INPUT, DIM_INPUT, DIM_INPUT, DIM_INPUT,\n args.dhid, args.receptors, args)\n baseline.load_best_save(directory=args.resume)\n #baseline.trainable = False\n #baseline = copy.deepcopy(model)\n for param in baseline.parameters():\n param.requires_grad = False\n else:\n baseline = None\n model.run_train_reinforce(data_featurized[\"train\"],\n data_featurized[\"dev\"], s2v, args,\n baseline=baseline)\n else:\n model.run_train(data_featurized[\"train\"], data_featurized[\"dev\"],\n s2v, args)\n\n\ndef get_args():\n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n parser.add_argument('--dexp', help='root experiment folder', default='exp')\n parser.add_argument('--model', help='which model to use',\n default='statenet')\n parser.add_argument('--epochs', help='max epochs to run for', default=50,\n type=int)\n parser.add_argument('--demb', help='word embedding size', default=400,\n type=int)\n parser.add_argument('--dhid', help='hidden state size', default=200,\n type=int)\n parser.add_argument('--batch_size', help='batch size', default=50, type=int)\n parser.add_argument('--lr', help='learning rate', default=1e-3, type=float)\n parser.add_argument('--stop', help='slot to early stop on',\n default='joint_goal')\n parser.add_argument('--resume', help='save directory to resume from')\n parser.add_argument('-n', '--nick', help='nickname for model',\n default='default')\n parser.add_argument('--reinforce', action='store_true',\n help='train with RL')\n parser.add_argument('--baseline', help='use baseline for variance reduction when fine tuning',\n action='store_true')\n parser.add_argument('--patience', help='Patience for early stopping',\n default=20, type=int)\n parser.add_argument('--gamma', help='RL discount', default=0.99, type=float)\n parser.add_argument('--seed', default=42, help='random seed', type=int)\n parser.add_argument('--test', action='store_true',\n help='run in evaluation only mode')\n parser.add_argument('--pred', action='store_true',\n help='run in prediction only mode')\n parser.add_argument('--gpu', type=int, help='which GPU to use')\n parser.add_argument('--dropout', nargs='*', help='dropout rates',\n default=['emb=0.2', 'local=0.2', 'global=0.2'])\n parser.add_argument('--train_domains', nargs='+',\n help='Domains on which to train, If finetune_domains is'\n ' also set, these will be used for pretraining.',\n default='all')\n parser.add_argument('--finetune_domains', nargs='+',\n help='Domains on which to finetune')\n parser.add_argument('--eval_domains', nargs='+',\n help='Domains on which to evaluate', default='all')\n parser.add_argument('--train_strict', action='store_true',\n help='Restrict pretraining to dialogs with '\n 'train_domains only')\n parser.add_argument('--finetune_strict', action='store_true',\n help='Restrict finetuning dialogs with '\n 'finetune_domains only')\n parser.add_argument('--eta', help='factor for loss for binary slot filling '\n 'prediction', default=0.5, type=float)\n parser.add_argument('--path', help='path to data files',\n default='data/multiwoz/ann/')\n parser.add_argument('--max_dialog_length', default=-1, type=int)\n parser.add_argument('--max_train_dialogs', default=-1, type=int)\n parser.add_argument('--max_dev_dialogs', default=-1, type=int)\n parser.add_argument('--elmo', action='store_true',\n help=\"If set, use ELMo for encoding inputs\")\n parser.add_argument('--pooled', action='store_true',\n help=\"If set, use max pooled ELMo embeddings\")\n parser.add_argument('--elmo_weights',\n default='res/elmo/elmo_2x1024_128_2048cnn_1xhighway_'\n 'weights.hdf5')\n parser.add_argument('--elmo_options',\n default='res/elmo/elmo_2x1024_128_2048cnn_1xhighway_'\n 'options.json')\n parser.add_argument('--delexicalize_labels', action='store_true',\n help=\"If set, replaces labels with dummy for select \"\n \"slots\")\n parser.add_argument('--encode_sys_utt', action='store_true',\n help=\"If set, uses system utterance too, instead of \"\n \"just system act representation\")\n parser.add_argument('--receptors', default=3,\n help='number of receptors per n-gram', type=int)\n parser.add_argument('--M', default=3, help='max n-gram size', type=int)\n parser.add_argument('--outfile', help='output file for test')\n parser.add_argument('--log_level', help='log level. default is info',\n default=\"info\")\n\n _args = parser.parse_args()\n _args.dout = os.path.join(_args.dexp, _args.model, _args.nick)\n _args.dropout = {d.split('=')[0]: float(d.split('=')[1])\n for d in _args.dropout}\n if not os.path.isdir(_args.dout):\n os.makedirs(_args.dout)\n return _args\n\n\nif __name__ == '__main__':\n main(get_args())\n" }, { "alpha_fraction": 0.5569947957992554, "alphanum_fraction": 0.5569947957992554, "avg_line_length": 47.25, "blob_id": "c50bcde2bc4ce05d0344c41639626f21095cde53", "content_id": "414c5281f8c09740ae00b0a234f6969243173fec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 77, "num_lines": 8, "path": "/util/data.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "from collections import namedtuple\n\nTurn = namedtuple(\"Turn\", [\"user_utt\", \"system_act\", \"system_utt\",\n \"x_utt\", \"x_act\", \"x_sys\", \"labels\", \"labels_str\",\n \"belief_state\"])\nDialog = namedtuple(\"Dialog\", [\"turns\"])\nSlot = namedtuple(\"Slot\", [\"domain\", \"embedding\", \"values\"])\nValue = namedtuple(\"Value\", [\"value\", \"embedding\", \"idx\"])\n" }, { "alpha_fraction": 0.6639871597290039, "alphanum_fraction": 0.6881029009819031, "avg_line_length": 41.86206817626953, "blob_id": "30516f1af1d3225cace71ada237706c5d55850be", "content_id": "be4d2a1d26b855296df6418e3e9b131eb36b7f29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1244, "license_type": "no_license", "max_line_length": 223, "num_lines": 29, "path": "/scripts/pretrain_domains.sh", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# normal cpu stuff: allocate cpus, memory\n#SBATCH --ntasks=1 --cpus-per-task=10 --mem=6000M\n# we run on the gpu partition and we allocate 2 titanx gpus\n#SBATCH -p gpu --gres=gpu:1\n#We expect that our program should not run langer than 4 hours\n#Note that a program will be killed once it exceeds this time!\n#SBATCH --time=12:00:00\n\n#your script, in this case: write the hostname and the ids of the chosen gpus.\n\nDOMAINS=(taxi hotel restaurant attraction train);\n\nhostname\necho $CUDA_VISIBLE_DEVICES\n\nsource /home/grn762/projects/dialog-rl/dialog_env/bin/activate\n#source /home/vtx829/.env/bin/activate\n#python -m test.test_statenet --train_domains taxi --eval_domains taxi --epochs 100 --train_strict --gpu 1 --elmo --pooled -n test_slurm_taxi > test_slurm_taxi.log 2> test_slurm_taxi.err\n\nfor domain in ${DOMAINS[@]};\ndo\n\tpretrain=(${DOMAINS[@]//*$domain*}); # all but $domain\n echo \"==============================\"\n\techo \"Target domain: \" $domain;\n\techo \" Pretrain:\" ${pretrain[@]};\n \tpython -m run --train_domains ${pretrain[@]} --train_strict --eval_domains ${pretrain[@]} --gpu $gpu -n pretrain-$domain --delexicalize_labels --epochs 200 --elmo --pooled > logs/pretrain-$domain-stdout.log & ((gpu++)) ;\n \tbreak\ndone\n\n" }, { "alpha_fraction": 0.47356322407722473, "alphanum_fraction": 0.6867815852165222, "avg_line_length": 15.11111068725586, "blob_id": "a78ecf78444d65a047575ee1b279e25d10f94580", "content_id": "6b6c9c082c5ad40196a5194a508a2463b98a4d11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 1740, "license_type": "no_license", "max_line_length": 31, "num_lines": 108, "path": "/requirements.txt", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "alabaster==0.7.12\nallennlp==0.7.1\nasn1crypto==0.24.0\natomicwrites==1.2.1\nattrs==18.2.0\naws-xray-sdk==0.95\nawscli==1.16.60\nBabel==2.6.0\nboto==2.49.0\nboto3==1.9.50\nbotocore==1.12.50\ncertifi==2018.8.24\ncffi==1.11.2\nchardet==3.0.4\nClick==7.0\ncolorama==0.3.9\nconllu==0.11\ncookies==2.2.1\ncryptography==2.4.2\ncycler==0.10.0\ncymem==2.0.2\ncytoolz==0.9.0.1\ndill==0.2.8.2\ndocker==3.5.1\ndocker-pycreds==0.3.0\ndocutils==0.14\necdsa==0.13\neditdistance==0.5.2\nembeddings==0.0.4\nflaky==3.4.0\nFlask==0.12.4\nFlask-Cors==3.0.3\nftfy==5.5.0\nfuture==0.17.1\ngevent==1.3.6\ngreenlet==0.4.15\nh5py==2.8.0\nidna==2.6\nimagesize==1.1.0\nitsdangerous==1.1.0\nJinja2==2.10\njmespath==0.9.3\njsondiff==1.1.1\njsonnet==0.10.0\njsonpickle==1.0\nkiwisolver==1.0.1\nMarkupSafe==1.1.0\nmatplotlib==2.2.3\nmock==2.0.0\nmore-itertools==4.3.0\nmoto==1.3.4\nmsgpack==0.5.6\nmsgpack-numpy==0.4.3.2\nmurmurhash==1.0.1\nnltk==3.4\n# numpy==1.13.1\nnumpydoc==0.8.0\noverrides==1.9\npackaging==18.0\nparsimonious==0.8.0\npbr==5.1.1\n# pkg-resources==0.0.0\nplac==0.9.6\npluggy==0.8.0\npreshed==2.0.1\nprotobuf==3.4.0\npy==1.7.0\npyaml==18.11.0\npyasn1==0.4.4\npycparser==2.19\npycryptodome==3.7.0\nPygments==2.2.0\npyparsing==2.3.0\npytest==4.0.0\npython-dateutil==2.7.5\npython-jose==2.0.2\npytz==2017.3\nPyYAML==3.13\nregex==2018.1.10\nrequests==2.18.4\nresponses==0.10.4\nrsa==3.4.2\ns3transfer==0.1.13\nscikit-learn==0.20.0\nscipy==1.1.0\nsingledispatch==3.4.0.3\nsix==1.11.0\nsnowballstemmer==1.2.1\nspacy==2.0.16\nSphinx==1.8.2\nsphinxcontrib-websupport==1.1.0\nsqlparse==0.2.4\nstanza==0.3\ntensorboardX==1.2\nthinc==6.12.0\ntoolz==0.9.0\n# torch==0.4.1\ntorchsummary==1.5.1\ntqdm==4.19.1.post1\nujson==1.35\nUnidecode==1.0.23\nurllib3==1.22\nvocab==0.0.3\nwcwidth==0.1.7\nwebsocket-client==0.54.0\nWerkzeug==0.14.1\nwrapt==1.10.11\nxmltodict==0.11.0\n" }, { "alpha_fraction": 0.5102792978286743, "alphanum_fraction": 0.5147401094436646, "avg_line_length": 31.29227638244629, "blob_id": "11dc617327631f70332b5c105cdc1131b4577779", "content_id": "c4e6c035559e01f31c5dfaba8980438f59d85623", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15468, "license_type": "no_license", "max_line_length": 102, "num_lines": 479, "path": "/util/featurize.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom collections import OrderedDict\nimport torch\nfrom util.data import *\nfrom tqdm import tqdm\n\n\ndef get_value_index(_values, _val):\n for _idx, candidate in enumerate(_values):\n if candidate.value == _val:\n return _idx\n return -1\n\n\ndef featurize_dialogs_elmo(_data, s2v, device, args, pooled=True):\n featurized_dialogs = []\n\n for dg in tqdm(_data):\n featurized_turns = []\n\n all_user_utts = []\n all_system_acts = []\n all_system_utts = []\n all_lbls = []\n all_ys = []\n all_bsts = []\n\n for t in dg['turns']:\n if args.pooled:\n utt = t['usr_trans_elmo_pool']\n sys = t['sys_trans_elmo_pool']\n act = t['sys_acts_elmo_pool']\n else:\n utt = t['usr_trans_elmo']\n sys = t['sys_trans_elmo']\n act = t['sys_acts_elmo']\n bst = t['belief_state']\n lbls = {}\n for s, v in t['turn_label']:\n v = v.lower()\n if v in [_v.value for _v in s2v[s].values]:\n lbls[s] = v\n else:\n lbls[s] = \"<true>\"\n ys = {}\n for slot, val in lbls.items():\n values = s2v[slot].values\n ys[slot] = torch.zeros(len(values))\n idx = get_value_index(values, val)\n ys[slot][idx] = 1\n\n all_user_utts.append(utt)\n all_system_acts.append(act)\n all_system_utts.append(sys)\n all_ys.append(ys)\n all_lbls.append(lbls)\n all_bsts.append(bst)\n\n all_x_utt = all_user_utts\n all_x_act = all_system_acts\n all_x_sys = all_system_utts\n\n for i in range(len(dg['turns'])):\n\n # Encode user and action representations\n if args.pooled:\n x_utt = all_x_utt[i].to(device)\n x_sys = all_x_sys[i].to(device)\n x_act = all_x_act[i].to(device)\n else:\n x_utt = [t.to(device) for t in\n all_x_utt[i]] # one vector per n\n x_sys = [t.to(device) for t in\n all_x_sys[i]] # one vector per n\n x_act = [t.to(device) for t in\n all_x_act[i]]\n\n featurized_turns.append(Turn(\n all_user_utts[i], all_system_acts[i], all_system_utts[i],\n x_utt, x_act, x_sys,\n all_ys[i], all_lbls[i], all_bsts[i]))\n\n featurized_dialogs.append(Dialog(featurized_turns))\n\n return featurized_dialogs\n\n\ndef featurize_dialogs(_data, s2v, device, args, w2v=None):\n featurized_dialogs = []\n\n utt_ftz = UserInputNgramFeaturizer(w2v, n=args.M)\n sys_ftz = UserInputNgramFeaturizer(w2v, n=args.M)\n act_ftz = ActionFeaturizer(w2v)\n\n for dg in tqdm(_data):\n featurized_turns = []\n\n all_user_utts = []\n all_system_acts = []\n all_system_utts = []\n all_lbls = []\n all_ys = []\n all_bsts = []\n\n for t in dg['turns']:\n utt = t['transcript']\n sys = t['system_transcript']\n act = t['system_acts']\n bst = t['belief_state']\n lbls = {}\n for s, v in t['turn_label']:\n v = v.lower()\n if v in [_v.value for _v in s2v[s].values]:\n lbls[s] = v\n else:\n lbls[s] = \"<true>\"\n\n ys = {}\n for slot, val in lbls.items():\n values = s2v[slot].values\n ys[slot] = torch.zeros(len(values))\n idx = get_value_index(values, val)\n ys[slot][idx] = 1\n\n all_user_utts.append(utt)\n all_system_acts.append(act)\n all_system_utts.append(sys)\n all_ys.append(ys)\n all_lbls.append(lbls)\n all_bsts.append(bst)\n\n all_x_utt = utt_ftz.featurize_batch(all_user_utts)\n all_x_act = act_ftz.featurize_batch(all_system_acts)\n all_x_sys = sys_ftz.featurize_batch(all_system_utts)\n\n for i in range(len(dg['turns'])):\n\n # Encode user and action representations\n if args.elmo:\n x_utt = all_x_utt[i].to(device)\n x_sys = all_x_sys[i].to(device)\n else:\n x_utt = [t.to(device) for t in\n all_x_utt[i]] # one vector per n\n x_sys = [t.to(device) for t in\n all_x_sys[i]] # one vector per n\n x_act = all_x_act[i].to(device)\n\n featurized_turns.append(Turn(\n all_user_utts[i], all_system_acts[i], all_system_utts[i],\n x_utt, x_act, x_sys,\n all_ys[i], all_lbls[i], all_bsts[i]))\n\n featurized_dialogs.append(Dialog(featurized_turns))\n\n return featurized_dialogs\n\n\ndef make_n_gram_bow(sequence, n, mode='sum', vectors=True):\n \"\"\"\n Aggregates over sliding window of `n' concatenations of word vectors.\n Vectors can either be fixed-size embeddings or one-hot vectors. In the\n latter case, it's a straight BOW model, no concatenation of n-grams for\n now (should this be implemented? seems like this would grow the vectors\n really large as they get length V**n).\n :param sequence:\n :param n:\n :param mode:\n :param vectors:\n :return:\n \"\"\"\n bow = []\n assert len(sequence) > n, \"Sequence too short (must be at least n+1 long)\"\n if vectors:\n for i in range(len(sequence) - n + 1):\n bow.append(np.concatenate(sequence[i:i+n]))\n else:\n # TODO actually do n-gram BOW (every n-gram gets its index...)\n bow = sequence # just aggregate over one-hot vectors\n if mode == 'sum':\n return np.sum(bow, 0)\n elif mode == 'avg':\n return np.mean(bow, 0)\n elif mode == 'max':\n return np.max(bow, 0)\n else:\n return NotImplementedError(\"Mode for aggregating n-grams must be one \"\n \"of 'sum', 'avg' or 'max'.\")\n\n\nclass LabelMapper:\n \"\"\"\n Encodes true values as one-hot vectors\n \"\"\"\n def __init__(self):\n super().__init__()\n self.label_values = set()\n self.label2id = OrderedDict()\n self.id2label = OrderedDict()\n\n def fit(self, labels, warm_start=False):\n if not warm_start:\n self.label_values = set()\n for label in labels:\n self.label_values.add(label)\n _values = sorted(list(self.label_values))\n for i, v in enumerate(_values):\n self.label2id[v] = i\n self.id2label[i] = v\n\n def transform(self, labels, onehot=True):\n \"\"\"\n\n :param labels:\n :param onehot: If true, encode labels to onehot. Else, encode to\n categorical representations (integers)\n :return:\n \"\"\"\n out = []\n for l in labels:\n if onehot:\n encoding = np.zeros(len(self.label_values))\n encoding[self.label2id[l]] = 1\n else:\n encoding = self.label2id[l]\n out.append(encoding)\n return out\n\n def fit_transform(self, labels, onehot=True):\n self.fit(labels)\n return self.transform(labels, onehot)\n\n\nclass Featurizer:\n\n def __init__(self):\n pass\n\n def fit_transform(self, inputs):\n pass\n\n def fit(self, inputs):\n pass\n\n\nclass ElmoFeaturizer(Featurizer):\n\n def __init__(self, elmo, mode):\n super().__init__()\n self.elmo = elmo\n self.mode = mode\n self.map = self.system_act_mapping()\n\n def featurize_turn(self, turn):\n if self.mode == \"utterance\":\n turn = [\"<bos>\"] + turn + [\"<eos>\"]\n elif self.mode == \"act\":\n turn = [item for sublist in turn for item in sublist]\n turn = self.clean_act(turn)\n elif self.mode in [\"slot\", \"value\"]:\n turn = [turn]\n #turn = self.clean_act(turn)\n # get elmo embeddings\n if not turn:\n turn = [[\"<NIL>\"]]\n e_toks = self.elmo.batch_to_embeddings(turn)[0][0]\n\n # Sequence of elmo embeddings with all 3 layers concatenated for each token\n #tok_embs = torch.cat((e_toks[0, :, :],\n # e_toks[1, :, :],\n # e_toks[2, :, :]),\n # dim=1)\n\n # Average 3 ELMo layers\n tok_embs = torch.mean(e_toks, dim=0)\n\n # max over tokens & flatten\n pooled = torch.max(tok_embs, dim=0)[0].view(-1)\n #pooled = torch.max(e_toks, dim=1)[0].view(-1)\n\n return tok_embs, pooled\n\n def featurize_batch(self, batch):\n if self.mode == \"utterance\":\n batch = [[\"<bos>\"] + turn + [\"<eos>\"] for turn in batch]\n elif self.mode == \"act\":\n batch = [self.clean_act([item for sublist in turn for item in sublist]) for turn in batch]\n #elif self.mode in [\"slot\", \"value\"]:\n # batch = [self.clean_act(turn) for turn in batch]\n\n e_toks = self.elmo.batch_to_embeddings(batch)[0]\n\n # Sequence of elmo embeddings with all 3 layers concatenated for each token\n #tok_embs = torch.cat((e_toks[:, 0, :, :],\n # e_toks[:, 1, :, :],\n # e_toks[:, 2, :, :]),\n # dim=2)\n\n # Average 3 ELMo layers\n tok_embs = torch.mean(e_toks, dim=1)\n\n # max over tokens & flatten\n pooled = torch.max(tok_embs, dim=1)[0].view(len(batch), -1)\n #pooled = torch.max(e_toks, dim=2)[0].view(len(batch), -1)\n\n return tok_embs, pooled\n\n def clean_act(self, turn):\n #turn = [self.map.get(item, item) for item in turn]\n turn = [\"<bos>\"] + turn + [\"<eos>\"]\n return turn\n\n def system_act_mapping(self):\n mapping = {'Dest': 'destination',\n 'Ref': 'reference',\n '=': 'is',\n 'Addr': 'address',\n '?': 'unknown',\n 'a': 'a'}\n return mapping\n\n\nclass UserInputNgramFeaturizer(Featurizer):\n\n def __init__(self, embeddings, n=2):\n \"\"\"\n\n :param embeddings: Embeddings dictionary, mapping words to fix length\n vectors\n :param n: the order for n-gram concatenations (see StateNet paper)\n \"\"\"\n super().__init__()\n self.embeddings = embeddings\n self.n = n\n\n def featurize_word(self, word):\n return np.array(self.embeddings.get(word.lower()),\n dtype=np.float)\n\n def featurize_turn(self, turn):\n if type(turn) == str:\n turn = turn.split()\n turn = ['<sos>'] + turn + ['<eos>']\n # if not turn:\n # return torch.zeros(len(self.embeddings['i']) * self.n)\n seq = [self.featurize_word(w) for w in turn if w in self.embeddings]\n if len(seq) < (self.n+1):\n seq += [self.featurize_word(\"<eos>\") for _ in range(self.n+1 -\n len(seq))]\n utt_reps = []\n for k in range(self.n):\n kgram = make_n_gram_bow(seq, k + 1, mode='sum')\n utt_reps.append(torch.Tensor(kgram))\n #print(len(turn), turn)\n #print(len(utt_reps), [k.shape for k in utt_reps])\n #return utt_reps[self.n - 1]\n return utt_reps\n\n def featurize_dialog(self, dialog):\n return [self.featurize_turn(t) for t in dialog.to_dict()['turns']]\n\n def featurize_batch(self, batch):\n return [self.featurize_turn(t) for t in batch]\n\n\nclass UserInputFeaturizer(Featurizer):\n\n def __init__(self, embeddings, n=2):\n \"\"\"\n\n :param embeddings: Embeddings dictionary, mapping words to fix length\n vectors\n :param n: the order for n-gram concatenations (see StateNet paper)\n \"\"\"\n super().__init__()\n self.embeddings = embeddings\n self.n = n\n\n def featurize_word(self, word):\n return np.array(self.embeddings.get(word.lower()),\n dtype=np.float)\n\n def featurize_turn(self, turn):\n if type(turn) == str:\n turn = turn.split()\n turn = ['<sos>'] + turn + ['<eos>']\n # if not turn:\n # return torch.zeros(len(self.embeddings['i']) * self.n)\n seq = [self.featurize_word(w) for w in turn if w in self.embeddings]\n if len(seq) < (self.n+1):\n seq += [self.featurize_word(\"<eos>\") for _ in range(self.n+1 -\n len(seq))]\n ngrams = make_n_gram_bow(seq, self.n, mode='sum')\n # print(seq, ngrams, ngrams.shape)\n # print(turn )\n return torch.Tensor(ngrams)\n\n def featurize_dialog(self, dialog):\n return [self.featurize_turn(t) for t in dialog.to_dict()['turns']]\n\n def featurize_batch(self, batch):\n return [self.featurize_turn(t) for t in batch]\n\n\nclass ActionFeaturizer(Featurizer):\n\n def __init__(self, embeddings, mode='max'):\n \"\"\"\n\n :param embeddings: Embeddings dictionary, mapping words to fix length\n vectors\n \"\"\"\n super().__init__()\n self.embeddings = embeddings\n self.mode = mode\n\n def featurize_act(self, act):\n if self.mode == 'max':\n # maxpooling over words in act (['inform', 'Price', '=', 'cheap'])\n vec = torch.Tensor(np.max(\n [np.array(self.embeddings.get(w.lower()), dtype=np.float)\n for w in act if w.lower() in self.embeddings], axis=0))\n else:\n raise NotImplementedError('only max pooling implemented so far')\n return vec\n\n def featurize_turn(self, turn):\n if turn:\n # print(turn)\n act_f = torch.stack([self.featurize_act(a) for a in turn])\n # print(act_f)\n return torch.max(act_f, 0)[0]\n # return np.array(act_f).sum()\n\n else:\n return torch.zeros(len(self.embeddings['i']))\n\n def featurize_batch(self, batch):\n return [self.featurize_turn(t) for t in batch]\n\n\nclass SlotFeaturizer(Featurizer):\n\n def __init__(self, embeddings):\n super().__init__()\n self.embeddings = embeddings\n oov_len = len(embeddings[list(embeddings.keys())[0]])\n self.oov = np.zeros(oov_len)\n\n def featurize_turn(self, slot):\n vecs = np.array([self.embeddings.get(w.lower(), self.oov)\n for w in slot])\n if not len(vecs):\n vecs = [self.oov]\n slot_emb = np.max(vecs, 0) # max across dimensions\n return torch.Tensor(slot_emb)\n\n def featurize_batch(self, batch):\n return [self.featurize_turn(t) for t in batch]\n\n\nclass ValueFeaturizer(Featurizer):\n\n def __init__(self, embeddings):\n super().__init__()\n self.embeddings = embeddings\n oov_len = len(embeddings[list(embeddings.keys())[0]])\n self.oov = np.zeros(oov_len)\n\n def featurize_turn(self, val):\n vecs = np.array([self.embeddings.get(w.lower(), self.oov)\n for w in val])\n if not len(vecs):\n vecs = [self.oov]\n val_emb = np.max(vecs, 0) # max across dimensions\n return torch.Tensor(val_emb)\n\n def featurize_batch(self, batch):\n return [self.featurize_turn(t) for t in batch]\n" }, { "alpha_fraction": 0.5166919827461243, "alphanum_fraction": 0.5214792490005493, "avg_line_length": 40.252872467041016, "blob_id": "099730bc46aa031e5cee801dfc7660da9d2e93a1", "content_id": "a0d20797923f8475844ce310ed51d19b91614a62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39480, "license_type": "no_license", "max_line_length": 121, "num_lines": 957, "path": "/models/statenet.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport logging\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom torch.nn.modules.normalization import LayerNorm\nfrom torch.distributions import Categorical\nimport numpy as np\nfrom tqdm import tqdm\nfrom collections import defaultdict\nfrom pprint import pformat\nfrom util.eval import evaluate_preds, get_reward\nfrom util import util\n\n\n# TODO refactor such that encoder classes are declared within StateNet, allows\n# for better modularization and sharing of instances/variables such as\n# embeddings\n\n\neps = np.finfo(np.float32).eps.item()\n\n\nclass MultiScaleReceptors(nn.Module):\n \"\"\"\n\n \"\"\"\n def __init__(self, in_dim, out_dim, receptors):\n super().__init__()\n # Number of linear networks\n self.receptors = receptors\n # Init linear networks\n for i in range(self.receptors):\n setattr(self, 'linear_out_{}'.format(i), nn.Linear(in_dim, out_dim))\n\n def forward(self, utt_ngram_rep):\n out = []\n # Get output for every linear network\n for i in range(self.receptors):\n lin_layer = getattr(self, 'linear_out_{}'.format(i))\n out.append(lin_layer(utt_ngram_rep))\n # Return concatenated ngram representation: stack along receptors,\n # transpose such that output shape is [batch_size, receptors, out_dim]\n return torch.stack(out).transpose(0, 1)\n\n\nclass MultiScaleReceptorsModule(nn.Module):\n \"\"\"\n\n \"\"\"\n def __init__(self, in_dim, out_dim, receptors, n):\n super().__init__()\n self.receptors = receptors\n self.n = n\n self.layer_norm = LayerNorm(receptors * out_dim)\n self.linear_out = nn.Linear(receptors * out_dim, out_dim)\n\n # Initialize the c linear nets for each k-gram utt rep for 1 >= k >= n\n for i in range(n):\n msr_in_dim = in_dim * (i + 1)\n setattr(self, 'linear_out_r{}'.format(i),\n MultiScaleReceptors(msr_in_dim, out_dim, self.receptors))\n\n def forward(self, user_ngram_utterances):\n \"\"\"\n :param user_ngram_utterances:\n :return:\n \"\"\"\n rets = []\n batch_size = len(user_ngram_utterances[0])\n # For each k-gram utterance representation, get output from MSR networks\n for i in range(self.n):\n msr = getattr(self, 'linear_out_r{}'.format(i))\n msr_out = msr(user_ngram_utterances[i])\n rets.append(msr_out)\n\n rets = torch.stack(rets).transpose(0, 1)\n # sum along n-grams and flatten receptors and hidden\n out = torch.sum(rets, 1).view(batch_size, -1)\n out = self.layer_norm(out)\n out = F.relu(out)\n out = self.linear_out(out)\n return out\n\n\nclass UtteranceEncoder(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, in_dim, out_dim):\n super().__init__()\n self.layer_norm = LayerNorm(in_dim)\n self.linear_out = nn.Linear(in_dim, out_dim)\n\n def forward(self, user_utterance):\n \"\"\"\n\n :param user_utterance:\n :return:\n \"\"\"\n user_utterance = torch.stack(user_utterance).transpose(0, 1)\n try:\n out = self.layer_norm(user_utterance)\n except RuntimeError:\n print(user_utterance, user_utterance.shape)\n raise RuntimeError\n\n out = F.relu(out)\n out = self.linear_out(out)\n return out\n\n\nclass ActionEncoder(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, in_dim, out_dim):\n super().__init__()\n self.linear = nn.Linear(in_dim, out_dim)\n\n def forward(self, action):\n \"\"\"\n\n :param action:\n :return:\n \"\"\"\n return F.relu(self.linear(action))\n\n\nclass SlotEncoder(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, in_dim, out_dim, device):\n \"\"\"\n\n :param in_dim:\n :param out_dim:\n \"\"\"\n super().__init__()\n self.linear = nn.Linear(in_dim, out_dim)\n self.device = device\n\n def forward(self, slot_embedding):\n \"\"\"\n\n :param slot_embedding: Vector representation of slot\n :return:\n \"\"\"\n return F.relu(self.linear(slot_embedding)).view(-1).to(self.device)\n\n\nclass PredictionEncoder(nn.Module):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, in_dim, hidden_dim, out_dim):\n \"\"\"\n\n \"\"\"\n super().__init__()\n self.rnn = nn.GRU(in_dim, hidden_dim)\n self.linear = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, inputs, hidden):\n \"\"\"\n Runs the RNN to compute outputs based on history from previous\n slots and turns. We maintain the hidden state across calls to this\n function.\n :param inputs: shape (batch_size, embeddings)\n :param hidden:\n :return: shape (batch_size, self.out_dim)\n \"\"\"\n batch_size, embedding_length = inputs.shape\n # reshape input to length 1 sequence (RNN expects input shape\n # [sequence_length, batch_size, embedding_length])\n inputs = inputs.view(1, batch_size, embedding_length)\n # compute output and new hidden state\n # print(hidden.shape, inputs.shape)\n rnn_out, hidden = self.rnn(inputs, hidden)\n # reshape to [batch_size,\n rnn_out = rnn_out.view(batch_size, -1)\n o = F.relu(self.linear(rnn_out))\n # print(\"prediction vector:\", o.shape)\n return o, hidden\n\n\nclass ValueEncoder(nn.Module):\n \"\"\"\n\n \"\"\"\n def __init__(self, in_dim, out_dim, device):\n \"\"\"\n\n :param in_dim:\n :param out_dim:\n \"\"\"\n super().__init__()\n self.linear = nn.Linear(in_dim, out_dim)\n self.device = device\n\n def forward(self, value_embedding):\n \"\"\"\n\n :param value_embedding:\n :return:\n \"\"\"\n return F.relu(self.linear(value_embedding)).to(self.device)\n\n\nclass StateNet(nn.Module):\n \"\"\"\n Implementation based on Ren et al. (2018): Towards Universal Dialogue\n State Tracking. EMNLP 2018. http://aclweb.org/anthology/D18-1299.\n\n The paper remains unclear regarding a number of points, for which we\n make decisions based on our intuition. These are, for example:\n\n (1) How are predictions across turns aggregated on the dialogue level?\n Is the probability for a slot-value pair maxed across turns?\n - We assume yes.\n (2) The paper says that parameters are optimized based on cross-entropy\n between slot-value predictions and gold labels. How does this integrate\n the LSTM that is located outside the turn loop?\n - Not really sure how to handle this yet...\n (3) Is the LSTM updated after every turn AND every slot representation\n computation?\n - We assume yes.\n\n \"\"\"\n\n def __init__(self, input_user_dim, input_action_dim, input_slot_dim,\n input_value_dim, hidden_dim, receptors,\n args):\n \"\"\"\n\n :param input_user_dim: dimensionality of user input embeddings\n :param input_action_dim: dimensionality of action embeddings\n :param hidden_dim:\n :param receptors:\n \"\"\"\n super().__init__()\n\n self.hidden_dim = hidden_dim\n self.args = args\n self.device = self.get_device(args.gpu)\n\n if args.encode_sys_utt:\n slot_hidden_dim = 3 * hidden_dim\n else:\n slot_hidden_dim = 2 * hidden_dim\n\n if not args.elmo:\n input_user_dim = input_user_dim * args.M\n u_in_dim = input_user_dim\n a_in_dim = input_action_dim\n # s_in_dim = input_user_dim\n n = int(u_in_dim / a_in_dim)\n if args.elmo:\n self.utt_enc = UtteranceEncoder(u_in_dim, hidden_dim)\n else:\n self.utt_enc = MultiScaleReceptorsModule(a_in_dim, hidden_dim,\n receptors, n)\n self.action_encoder = ActionEncoder(a_in_dim, hidden_dim)\n self.slot_encoder = SlotEncoder(input_slot_dim, hidden_dim,\n self.device)\n self.value_encoder = ValueEncoder(input_value_dim, hidden_dim,\n self.device)\n # self.prediction_encoder = PredictionEncoder(slot_hidden_dim,\n # hidden_dim, hidden_dim)\n self.turn_history_rnn = PredictionEncoder(slot_hidden_dim, hidden_dim,\n hidden_dim)\n self.slot_fill_indicator = nn.Linear(hidden_dim, 1)\n self.optimizer = None\n self.epochs_trained = 0\n self.logger = self.get_train_logger()\n self.logger.setLevel(args.log_level.upper())\n self.logger.info(args)\n self.set_optimizer()\n\n def set_epochs_trained(self, e):\n self.epochs_trained = e\n\n def set_optimizer(self):\n self.optimizer = optim.Adam(self.parameters(), lr=self.args.lr)\n # self.optimizer = optim.SGD(self.parameters(), lr=self.args.lr)\n\n def get_train_logger(self):\n logger = logging.getLogger(\n 'train-{}'.format(self.__class__.__name__))\n formatter = logging.Formatter('%(asctime)s [%(threadName)-12.12s] '\n '[%(levelname)-5.5s] %(message)s')\n file_handler = logging.FileHandler(\n os.path.join(self.args.dout, 'train.log'))\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger\n\n def forward_turn(self, batch, slots2values, hidden):\n \"\"\"\n\n # :param x_user: shape (batch_size, user_embeddings_dim)\n # :param x_action: shape (batch_size, action_embeddings_dim)\n # :param x_sys: shape (batch_size, sys_embeddings_dim)\n :param batch:\n :param hidden: shape (batch_size, 1, hidden_dim)\n :param slots2values: dict mapping slots to values to be tested\n # :param labels: dict mapping slots to one-hot ground truth value\n representations\n :return: tuple (loss, probs, hidden), with `loss' being the overall\n loss across slots, `probs' a dict mapping slots to probability\n distributions over values, `hidden' the new hidden state\n \"\"\"\n batch_size = len(batch)\n probs = defaultdict(list)\n binary_filling_probs = {}\n\n # user input encoding [batch_size, hidden_dim]\n\n all_utt = [torch.stack([turn.x_utt[k] for turn in batch])\n for k in range(len(batch[0].x_utt))]\n fu = self.utt_enc(all_utt)\n # system act input encoding [batch_size, hidden_dim]\n all_act = torch.stack([turn.x_act for turn in batch])\n fa = self.action_encoder(all_act)\n\n if self.args.encode_sys_utt:\n fy = self.utt_enc(torch.Tensor([turn.x_sys for turn in batch]))\n f_turn_inputs = torch.cat((fu, fa, fy), 1)\n else:\n f_turn_inputs = torch.cat((fu, fa), 1)\n\n # turn encodings [batch_size, hidden_dim]\n # and RNN hidden state [batch_size, hidden_dim_rnn]\n f_turn, hidden = self.turn_history_rnn(f_turn_inputs, hidden)\n\n # keep track of number of loss updates for later averaging\n loss_updates = torch.Tensor([0]).to(self.device)\n\n # iterate over slots and values, compute probabilities\n for slot_id in sorted(slots2values.keys()):\n slot = slots2values[slot_id]\n # compute encoding of inputs as described in StateNet paper, Sec. 2\n fs = self.slot_encoder(slot.embedding)\n # encoding of slot with turns in batch: [batch_size, hidden_dim]\n f_slot_turn = F.mul(fs, f_turn)\n\n # get binary prediction for slot presence {slot_id: [batch_size, 1]}\n binary_filling_probs[slot_id] = torch.sigmoid(\n self.slot_fill_indicator(f_slot_turn))\n\n # get probability distribution over values...\n values = slot.values\n for t, turn in enumerate(batch):\n probs[slot_id].append(None)\n if binary_filling_probs[slot_id][t] > 0.5:\n probs[slot_id][t] = torch.zeros(len(values))\n for v, value in enumerate(values):\n venc = self.value_encoder(value.embedding)\n # by computing 2-Norm distance following paper, Sec. 2.6\n probs[slot_id][t][v] = -torch.dist(f_slot_turn, venc)\n\n # softmax it!\n probs[slot_id][t] = F.softmax(probs[slot_id][t], 0)\n\n loss = torch.Tensor([0]).to(self.device)\n if self.training:\n for slot_id in slots2values.keys():\n\n # loss for binary slot presence\n # gold: 1 if slot in turn.labels (meaning it's filled), else 0\n # [batch_size, 1]\n if binary_filling_probs[slot_id] is not None:\n gold_slot_filling = torch.Tensor(\n [float(slot_id in turn.labels) for turn in batch]\n ).view(-1, 1).to(self.device)\n loss += self.args.eta * F.binary_cross_entropy(\n binary_filling_probs[slot_id],\n gold_slot_filling).to(self.device)\n loss_updates += 1\n\n for t, turn in enumerate(batch):\n # loss for slot-value pairing, if slot is present\n if slot_id in turn.labels and \\\n binary_filling_probs[slot_id][t] > 0.5:\n loss += F.binary_cross_entropy(\n probs[slot_id][t],\n turn.labels[slot_id]\n ).to(self.device)\n loss_updates += 1\n\n loss = loss / loss_updates\n mean_slots_filled = len(probs) / len(slots2values)\n return loss, probs, hidden, mean_slots_filled\n\n def forward(self, dialogs, slots2values):\n batch_size = len(dialogs)\n hidden = torch.zeros(1, batch_size, self.hidden_dim).to(self.device)\n global_probs = [{} for _ in range(batch_size)]\n global_loss = torch.Tensor([0]).to(self.device)\n per_turn_mean_slots_filled = []\n ys_turn = [[] for _ in range(batch_size)]\n scores = [defaultdict(list) for _ in range(batch_size)]\n\n batch_turns_first, mask = util.turns_first(dialogs)\n max_turns = max(mask)\n # turns is list of t-th turns in current batch\n for t, turns in enumerate(batch_turns_first):\n loss, turn_probs, hidden, mean_slots_filled = \\\n self.forward_turn(turns, slots2values, hidden)\n global_loss += loss\n turn_probs = util.invert_slot_turns(turn_probs, batch_size)\n turn_preds = [{} for _ in range(batch_size)]\n for batch_item in range(batch_size):\n if t < mask[batch_item]:\n for slot_id, slot in slots2values.items():\n if turn_probs[batch_item][slot_id] is not None:\n\n global_probs[batch_item][slot_id] = \\\n torch.zeros(len(slot.values))\n argmax = np.argmax(turn_probs[batch_item][slot_id].\n detach().numpy(), 0)\n turn_preds[batch_item][slot_id] = \\\n slots2values[slot_id].values[int(argmax)].value\n for v, value in enumerate(slot.values):\n global_probs[batch_item][slot_id][v] = max(\n global_probs[batch_item][slot_id][v],\n turn_probs[batch_item][slot_id][v])\n scores[batch_item][slot_id].append(\n turn_probs[batch_item][slot_id])\n\n ys_turn[batch_item].append(turn_preds[batch_item])\n\n ys = [{} for _ in range(batch_size)]\n for batch_item in range(batch_size):\n for slot, probs in global_probs[batch_item].items():\n score, argmax = probs.max(0)\n ys[batch_item][slot] = slots2values[slot].values[\n int(argmax)].value\n\n global_loss = global_loss / max_turns\n if per_turn_mean_slots_filled:\n dialog_mean_slots_filled = np.mean(per_turn_mean_slots_filled)\n else:\n dialog_mean_slots_filled = 0.0\n return ys, ys_turn, scores, global_loss, dialog_mean_slots_filled\n\n def run_train(self, dialogs_train, dialogs_dev, s2v, args,\n early_stopping=None):\n track = defaultdict(list)\n if self.optimizer is None:\n self.set_optimizer()\n self.logger.info(\"Starting training...\")\n if torch.cuda.is_available() and self.device.type == 'cuda':\n s2v = util.s2v_to_device(s2v, self.device)\n best = {}\n iteration = 0\n no_improvements_for = 0\n for epoch in range(1, args.epochs+1):\n global_mean_slots_filled = []\n # logger.info('starting epoch {}'.format(epoch))\n\n if not hasattr(self, \"epochs_trained\"):\n self.set_epochs_trained(0)\n self.epochs_trained += 1\n\n # train and update parameters\n self.train()\n train_predictions = []\n s = []\n for batch in tqdm(list(util.make_batches(dialogs_train,\n args.batch_size))):\n iteration += 1\n self.zero_grad()\n predictions, turn_predictions, scores, loss, mean_slots_filled = \\\n self.forward(batch, s2v)\n for i in range(len(batch)):\n train_predictions.append((predictions[i],\n turn_predictions[i]))\n # print(turn_predictions)\n global_mean_slots_filled.append(mean_slots_filled)\n loss.backward()\n self.optimizer.step()\n track['loss'].append(loss.item())\n\n # evalute on train and dev\n summary = {'iteration': iteration, 'epoch': self.epochs_trained}\n for k, v in track.items():\n summary[k] = sum(v) / len(v)\n self.logger.info(\"Evaluating...\")\n predictions, turn_predictions = zip(*train_predictions)\n summary.update({'eval_train_{}'.format(k): v for k, v in\n evaluate_preds(dialogs_train, predictions,\n turn_predictions, args.eval_domains\n ).items()})\n summary.update({'eval_dev_{}'.format(k): v for k, v in\n self.run_eval(dialogs_dev, s2v, args.eval_domains,\n self.args.dout +\n \"/prediction_dv_{}.json\".format(epoch)\n ).items()})\n\n #global_mean_slots_filled = np.mean(global_mean_slots_filled)\n #self.logger.info(\"Predicted {}% slots as present\".format(\n # global_mean_slots_filled*100))\n self.logger.info(\"Epoch summary: \" + str(summary))\n\n # do early stopping saves\n stop_key = 'eval_dev_{}'.format(args.stop)\n train_key = 'eval_train_{}'.format(args.stop)\n if best.get(stop_key, 0) <= summary[stop_key]:\n no_improvements_for = 0\n best_dev = '{:f}'.format(summary[stop_key])\n best_train = '{:f}'.format(summary[train_key])\n best.update(summary)\n self.save(best,\n identifier='epoch={epoch},iter={iteration},'\n 'train_{key}={train},dev_{key}={dev}'\n ''.format(\n epoch=self.epochs_trained,\n iteration=iteration,\n train=best_train, dev=best_dev,\n key=args.stop)\n )\n self.prune_saves()\n else:\n no_improvements_for += 1\n if no_improvements_for > args.patience:\n self.logger.info(\"Ending training after model did not\"\n \"improve for {} epochs\".format(\n no_improvements_for))\n break\n else:\n self.logger.info(\"Model did not improve for {} epochs. \"\n \"Patience is {} epochs.\".format(\n no_improvements_for, args.patience))\n\n summary.update({'best_{}'.format(k): v for k, v in best.items()})\n self.logger.info(pformat(summary))\n track.clear()\n\n def run_train_reinforce(self, dialogs_train, dialogs_dev, s2v, args, baseline=None):\n nn.utils.clip_grad_norm(self.parameters(), 5)\n track = defaultdict(list)\n if self.optimizer is None:\n self.set_optimizer()\n self.logger.info(\"Starting reinforcement training...\")\n if torch.cuda.is_available() and self.device.type == 'cuda':\n s2v = util.s2v_to_device(s2v, self.device)\n # Lower learning rate for reinforcement training\n if args.resume:\n lr_rl = args.lr * 0.1\n print('REINFORCE lr: {}'.format(lr_rl) )\n self.optimizer = optim.Adam(self.parameters(), lr=lr_rl)\n\n best = {}\n iteration = 0\n no_improvements_for = 0\n epoch_rewards = []\n epoch_jg = []\n\n best_reward = 0\n hill_climb_patience = 0\n for epoch in range(1, args.epochs + 1):\n global_mean_slots_filled = []\n # logger.info('starting epoch {}'.format(epoch))\n\n if not hasattr(self, \"epochs_trained\"):\n self.set_epochs_trained(0)\n self.epochs_trained += 1\n\n # train and update parameters\n self.train()\n train_predictions = []\n for batch in tqdm(list(util.make_batches(dialogs_train,\n args.batch_size))):\n batch_rewards = []\n batch_base_rewards = []\n batch_scores = []\n entropies = []\n batch_losses = []\n iteration += 1\n self.zero_grad()\n\n predictions, turn_predictions, scores, loss, mean_slots_filled = \\\n self.forward(batch, s2v)\n\n eval_scores = evaluate_preds(batch, predictions, turn_predictions,\n args.eval_domains)\n\n reward = get_reward(eval_scores)\n batch_reward = reward\n #scale = (-5, 5)\n #batch_reward = shape_reward(reward, scale_out=scale)\n\n # Eval on dev set and roll back if performance has gone down too much\n if iteration % 5 == 0:\n # Dev predictions\n dev_rew = self.run_eval(dialogs_dev, s2v, args.eval_domains, None)['dialog_reward']#['dialog_reward']\n self.train()\n\n if dev_rew > best_reward:\n print('Current best rew:', dev_rew)\n best_reward = dev_rew\n self.save('best-rl', 'best-rl')\n elif hill_climb_patience == 15:\n print('Patience reached, rolling back to previous best')\n fname = self.args.dout + '/best-rl.t7'\n self.load(fname)\n #self.optimizer.state['epoch'] = self.epochs_trained\n hill_climb_patience = 0\n\n # Get new predictions/reward\n predictions, turn_predictions, scores, loss, mean_slots_filled = \\\n self.forward(batch, s2v)\n eval_scores = evaluate_preds(batch, predictions, turn_predictions,\n args.eval_domains)\n reward = get_reward(eval_scores)\n batch_reward = reward\n else:\n hill_climb_patience += 1\n\n base_reward = None\n if baseline:\n base_preds, base_turn_preds, _, _, _ = \\\n baseline.forward(batch, s2v)\n base_eval_scores = evaluate_preds(batch, base_preds, base_turn_preds,\n args.eval_domains)\n b_reward = get_reward(base_eval_scores)\n # Fiddle around with baseline reward scaling\n base_reward = b_reward #shape_reward(b_reward, scale_out=scale)\n\n \"\"\"\n for batch_item, slot2score in enumerate(scores):\n for slot, score in slot2score.items():\n for t in range(len(score)):\n slot_turn_scores = F.softmax(scores[batch_item][slot][t])\n m = Categorical(slot_turn_scores)\n slot_turn_prediction = m.sample()\n slot_turn_prediction_log_prob = m.log_prob(\n slot_turn_prediction)\n # Entropy\n entropy = (slot_turn_prediction_log_prob * torch.exp(slot_turn_prediction_log_prob))\n entropies.append(entropy)\n # credit assignment: copy dialog-level reward to each\n # slot/turn\n batch_rewards.append(batch_reward)\n batch_scores.append(slot_turn_prediction_log_prob)\n \"\"\"\n for batch_item, slot2score in enumerate(scores):\n # Rewards, log probs and entropy for s-v pairs in each turn\n rews = []\n brews = []\n prbs = []\n ents = []\n for slot, score in slot2score.items():\n for t in range(len(score)):\n # Sample and compute log prob for slot predictions for turn\n slot_turn_scores = scores[batch_item][slot][t] #F.softmax(scores[batch_item][slot][t])\n m = Categorical(slot_turn_scores)\n slot_turn_prediction = m.sample()\n slot_turn_prediction_log_prob = m.log_prob(\n slot_turn_prediction)\n # TODO take argmax with prob 1-eps and sample with prob eps\n\n # Entropy\n entropy = (slot_turn_prediction_log_prob * torch.exp(slot_turn_prediction_log_prob))\n # credit assignment: copy dialog-level reward to each\n # slot/turn for discounted reward\n ents.append(entropy)\n rews.append(batch_reward)\n brews.append(base_reward)\n prbs.append(slot_turn_prediction_log_prob)\n\n #entropies.append(entropy)\n #batch_rewards.append(batch_reward)\n #batch_base_rewards.append(base_reward)\n #batch_scores.append(slot_turn_prediction_log_prob)\n\n # Compute losses for batch item\n bi_loss = self.reinforce_loss(rews, prbs, brews, ents, self.args.gamma)\n batch_losses.append(bi_loss)\n\n #print(len(scores))\n global_mean_slots_filled.append(mean_slots_filled)\n track['loss'].append(loss.item())\n\n #if batch_rewards:\n # self.reinforce_update(batch_rewards, batch_scores,\n # self.args.gamma, batch_base_rewards, entropies)\n\n if batch_losses:\n self.reinforce_update_losses(batch_losses)\n\n #if iteration % 10 == 0:\n # ev = self.run_eval(dialogs_dev, s2v, args.eval_domains, None)\n # self.train()\n # epoch_rewards.append(ev['dialog_reward'])\n # epoch_jg.append(ev['joint_goal'])\n # print('JG: ', ev['joint_goal'], 'BS:', ev['belief_state'], 'DR:', ev['dialog_reward'])\n # Save for train predictions for evaluation\n\n for i in range(len(batch)):\n train_predictions.append((predictions[i],\n turn_predictions[i]))\n\n #print(epoch_rewards)\n #print(epoch_jg)\n # evalute on train and dev\n summary = {'iteration': iteration, 'epoch': self.epochs_trained}\n for k, v in track.items():\n summary[k] = sum(v) / len(v)\n self.logger.info(\"Evaluating...\")\n predictions, turn_predictions = zip(*train_predictions)\n summary.update({'eval_train_{}'.format(k): v for k, v in\n evaluate_preds(dialogs_train, predictions,\n turn_predictions, args.eval_domains\n ).items()})\n summary.update({'eval_dev_{}'.format(k): v for k, v in\n self.run_eval(dialogs_dev, s2v, args.eval_domains,\n self.args.dout +\n \"/prediction_dv_{}_{}.json\".\n format(epoch, str(args.eval_domains))\n ).items()})\n\n global_mean_slots_filled = np.mean(global_mean_slots_filled)\n #self.logger.info(\"Predicted {}% slots as present\".format(\n # global_mean_slots_filled * 100))\n #self.logger.info(\"Epoch summary: \" + str(summary))\n\n # do early stopping saves\n stop_key = 'eval_dev_{}'.format(args.stop)\n train_key = 'eval_train_{}'.format(args.stop)\n if best.get(stop_key, 0) <= summary[stop_key]:\n no_improvements_for = 0\n best_dev = '{:f}'.format(summary[stop_key])\n best_train = '{:f}'.format(summary[train_key])\n best.update(summary)\n self.save(best,\n identifier='epoch={epoch},iter={iteration},'\n 'train_{key}={train},dev_{key}={dev}'\n ''.\n format(epoch=self.epochs_trained,\n iteration=iteration, train=best_train,\n dev=best_dev, key=args.stop))\n self.prune_saves()\n else:\n no_improvements_for += 1\n if no_improvements_for > args.patience:\n self.logger.info(\"Ending training after model did not\"\n \"improve for {} epochs\".format(\n no_improvements_for))\n break\n\n summary.update({'best_{}'.format(k): v for k, v in best.items()})\n self.logger.info(pformat(summary))\n track.clear()\n\n def discount_rewards(self, rewards, gamma):\n \"\"\"\n Compute discounted reward\n \"\"\"\n R = 0\n rews = []\n for r in rewards[::-1]:\n R = r + gamma * R\n rews.insert(0, R)\n rews = torch.FloatTensor(rews)\n return rews\n\n def reinforce_update(self, batch_rewards, log_probs, gamma, base_reward, entropies):\n policy_loss = []\n beta = 0.05 # Entropy weight\n\n # Compute discounted rewards\n rewards = self.discount_rewards(batch_rewards, gamma)\n base_rewards = self.discount_rewards(base_reward, gamma)\n\n #rewards = (rewards - rewards.mean()) / (rewards.std() + eps)\n #base_rewards = (base_rewards - base_rewards.mean()) / (base_rewards.std() + eps)\n #print(rewards.mean())\n #print(base_rewards.mean())\n for log_prob, reward, base, entropy in zip(log_probs, rewards, base_rewards, entropies):\n if np.isnan(reward):\n reward = 0\n if np.isnan(base):\n base = 0\n policy_loss.append(-log_prob * (reward - base) + entropy * beta)\n #policy_loss.append(-log_prob * reward)\n # policy_loss.append(-log_prob * (reward+0.001))\n # local_policy_loss = -log_prob * (reward+0.001)\n # local_policy_loss.backward()\n # policy_loss.backward()\n # policy_loss.append(reward)\n self.optimizer.zero_grad()\n policy_loss = torch.stack(policy_loss).sum() #/ len(log_probs)\n #print(\"-- POLICY LOSS --\", policy_loss, type(policy_loss))\n # policy_loss = torch.autograd.Variable(policy_loss)\n #print(policy_loss)\n # policy_loss.backward(retain_graph=True)\n try:\n policy_loss.backward()\n #for p in self.parameters():\n # p.data.add_(-self.args.lr, p.grad.data)\n self.optimizer.step()\n except RuntimeError:\n print(\"WARNING! couldn't update with policy loss:\", policy_loss)\n\n\n def reinforce_loss(self, bi_rewards, log_probs, base_reward, entropies, gamma):\n \"\"\"\n Calculate the loss for a batch items using discounted future rewards, entropy\n and the advantage for variance reduction\n \"\"\"\n if len(bi_rewards) == len(log_probs) == len(entropies) == 0:\n return torch.tensor(0., requires_grad=True).sum()\n\n policy_loss = []\n beta = 0.05\n\n # TODO Binary reward better/worse than base\n\n # Discounted reward\n #rewards = bi_rewards\n #base_rewards = base_reward\n rewards = self.discount_rewards(bi_rewards, gamma)\n base_rewards = self.discount_rewards(base_reward, gamma)\n\n # Centering\n #rewards = (rewards - rewards.mean()) / (rewards.std() + eps)\n #base_rewards = (base_rewards - base_rewards.mean()) / (base_rewards.std() + eps)\n\n # Compute accumulated dialog loss\n for log_prob, reward, base, entropy in zip(log_probs, rewards, base_rewards, entropies):\n if np.isnan(reward):\n reward = 0\n if np.isnan(base):\n base = 0\n # Calculate 'turn' loss\n advantage = reward - base\n policy_loss.append(-log_prob * advantage + entropy * beta)\n\n # Return dialog loss\n return torch.stack(policy_loss).sum()\n\n def reinforce_update_losses(self, batch_losses):\n \"\"\"\n Update the policy from computed batch losses\n \"\"\"\n\n self.optimizer.zero_grad()\n policy_loss = torch.stack(batch_losses).sum()\n\n #try:\n policy_loss.backward()\n #for p in self.parameters():\n # p.data.add_(-self.args.lr, p.grad.data)\n self.optimizer.step()\n #except RuntimeError:\n # print(\"WARNING! couldn't update with policy loss:\", policy_loss)\n\n def run_pred(self, dialogs, s2v):\n self.eval()\n predictions_d, turn_predictions, _, _, _ = self.forward(dialogs, s2v)\n return predictions_d, turn_predictions\n\n def run_eval(self, dialogs, s2v, eval_domains, outfile):\n if torch.cuda.is_available() and self.device.type == 'cuda':\n s2v = util.s2v_to_device(s2v, self.device)\n predictions, turn_predictions = self.run_pred(dialogs, s2v)\n return evaluate_preds(dialogs, predictions, turn_predictions,\n eval_domains, outfile)\n\n def save(self, summary, identifier):\n fname = '{}/{}.t7'.format(self.args.dout, identifier)\n logging.info('saving model to {}'.format(fname))\n state = {\n 'args': vars(self.args),\n 'model': self.state_dict(),\n 'summary': summary,\n 'optimizer': self.optimizer.state_dict(),\n 'epoch': self.epochs_trained\n }\n torch.save(state, fname)\n\n def load_rl_model(self, revert=True):\n self.logger.info('Reverting to previous best model')\n fname = '{}/best-rl.t7'.format(self.args.dout)\n state = torch.load(fname, map_location=lambda storage, loc: storage)\n self.set_optimizer()\n resume_from_epoch = state.get('epoch', 0)\n self.set_epochs_trained(resume_from_epoch)\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.device)\n\n def load(self, path):\n self.logger.info('loading model from {}'.format(path))\n #state = torch.load(path)\n state = torch.load(path, map_location=lambda storage, loc: storage)\n self.load_state_dict(state['model'])\n self.set_optimizer()\n self.optimizer.load_state_dict(state['optimizer'])\n resume_from_epoch = state.get('epoch', 0)\n self.set_epochs_trained(resume_from_epoch)\n self.logger.info(\"Resuming from epoch {}\".format(resume_from_epoch))\n for state in self.optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(self.device)\n\n def prune_saves(self, n_keep=5):\n scores_and_files = self.get_saves()\n if len(scores_and_files) > n_keep:\n for score, fname in scores_and_files[n_keep:]:\n os.remove(fname)\n\n def load_best_save(self, directory):\n if directory is None:\n directory = self.args.dout\n\n scores_and_files = self.get_saves(directory=directory)\n print(scores_and_files)\n if scores_and_files:\n assert scores_and_files, 'no saves exist at {}'.format(directory)\n score, fname = scores_and_files[0]\n self.load(fname)\n\n def get_saves(self, directory=None):\n if directory is None:\n directory = self.args.dout\n files = [f for f in os.listdir(directory) if f.endswith('.t7')]\n scores = []\n for fname in files:\n re_str = r'dev_{}=([0-9\\.]+)'.format(self.args.stop)\n dev_acc = re.findall(re_str, fname)\n if dev_acc:\n score = float(dev_acc[0].strip('.'))\n scores.append((score, os.path.join(directory, fname)))\n if not scores:\n raise Exception('No files found!')\n scores.sort(key=lambda tup: tup[0], reverse=True)\n return scores\n\n def get_device(self, device_id):\n if device_id is not None and torch.cuda.is_available():\n num_gpus = torch.cuda.device_count()\n gpu = device_id % num_gpus\n return torch.device('cuda:{}'.format(gpu))\n else:\n return torch.device('cpu')\n\n" }, { "alpha_fraction": 0.6387645602226257, "alphanum_fraction": 0.6584601402282715, "avg_line_length": 35.032257080078125, "blob_id": "a68079b3fbca8a89c13f9ee4f0af582ab12b09c8", "content_id": "79fa1a1c97ed7576df02cd0bfa7c9ba1858c26a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2234, "license_type": "no_license", "max_line_length": 104, "num_lines": 62, "path": "/util/elmo_data.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "from tqdm import tqdm\nfrom util import util\nfrom allennlp.commands.elmo import ElmoEmbedder\nfrom util.featurize import ElmoFeaturizer\nfrom collections import namedtuple\nimport pickle\n\n\nElmo = namedtuple('Elmo', ['utterance_feat', 'sys_act_feat'])\nDELEX = False\n\ndef main():\n # Init ELMO model\n elmo_emb = ElmoEmbedder(weight_file='res/elmo/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5',\n options_file='res/elmo/elmo_2x1024_128_2048cnn_1xhighway_options.json')\n\n # \"Warm up\" ELMo embedder (https://github.com/allenai/allennlp/blob/master/tutorials/how_to/elmo.md)\n warmup_data, _, _, _ = util.load_dataset(splits=['train'], base_path='../data/multiwoz/delex/')\n warmup_data = [dg.to_dict() for dg in warmup_data['train'].iter_dialogs()][:500]\n\n print('Warming up ELMo embedder on train dialogs')\n for d in tqdm(warmup_data):\n utts = []\n for t in d['turns']:\n utts.append(t['transcript'])\n _ = elmo_emb.batch_to_embeddings(utts)\n\n base_path = '../data/multiwoz/delex/'\n splits = ['train', 'test', 'dev']\n #splits = ['dev']\n\n # Load dialogs\n print('Creating elmo embeddings for annotated data')\n utterance_featurizer = ElmoFeaturizer(elmo_emb, 'utterance')\n sys_act_featurizer = ElmoFeaturizer(elmo_emb, 'act')\n\n elmo = Elmo(utterance_featurizer, sys_act_featurizer)\n\n dia_data, ontology = util.generate_dataset_elmo(elmo, splits=splits, base_path=base_path)\n\n # Save dataset\n for split in splits:\n pickle.dump(dia_data[split], open('{}_elmo_full.pkl'.format(base_path + split), 'wb'))\n # Workaround for s2v featurization\n dia_data[split] = [dg.to_dict() for dg in dia_data[split].iter_dialogs()]\n\n ## Create s2v embedding\n s2v = ontology.values\n if DELEX:\n s2v = util.delexicalize(s2v)\n s2v = util.fix_s2v(s2v, dia_data, splits=splits)\n\n slot_featurizer = ElmoFeaturizer(elmo_emb, \"slot\")\n value_featurizer = ElmoFeaturizer(elmo_emb, \"value\")\n\n s2v = util.featurize_s2v(s2v, slot_featurizer, value_featurizer, elmo=True, elmo_pool=False)\n\n # Save s2v\n pickle.dump(s2v, open('{}_elmo_full.pkl'.format(base_path + 's2v'), 'wb'))\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.693965494632721, "alphanum_fraction": 0.7284482717514038, "avg_line_length": 48.64285659790039, "blob_id": "64c7be1e11b1d8b677f5c40fa0e780ac93ce452c", "content_id": "8fe6c126badf4845308ed78523d58f1a9024b79a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 696, "license_type": "no_license", "max_line_length": 188, "num_lines": 14, "path": "/scripts/run_job.sh", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# normal cpu stuff: allocate cpus, memory\n#SBATCH --ntasks=1 --cpus-per-task=10 --mem=6000M\n# we run on the gpu partition and we allocate 2 titanx gpus\n#SBATCH -p gpu --gres=gpu:2\n#We expect that our program should not run langer than 4 hours\n#Note that a program will be killed once it exceeds this time!\n#SBATCH --time=12:00:00\n\n #your script, in this case: write the hostname and the ids of the chosen gpus.\n hostname\n echo $CUDA_VISIBLE_DEVICES\n source /home/vtx829/.env/bin/activate\n python -m test.test_statenet --train_domains taxi --eval_domains taxi --epochs 100 --train_strict --gpu 1 --elmo --pooled -n test_slurm_taxi > test_slurm_taxi.log 2> test_slurm_taxi.err\n\n" }, { "alpha_fraction": 0.6178790330886841, "alphanum_fraction": 0.6511831879615784, "avg_line_length": 36.96666717529297, "blob_id": "803d0c0c4ea230a9d110861619975e389697fd57", "content_id": "42241d9b47f6e4cdf699eecaceff62392085aa7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1141, "license_type": "no_license", "max_line_length": 272, "num_lines": 30, "path": "/scripts/run_pretrain.sh", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# normal cpu stuff: allocate cpus, memory\n#SBATCH --ntasks=1 --cpus-per-task=10 --mem=6000M\n# we run on the gpu partition and we allocate 2 titanx gpus\n#SBATCH -p gpu --gres=gpu:2\n# We expect that our program should not run langer than 4 hours\n# Note that a program will be killed once it exceeds this time!\n#SBATCH --time=120:00:00\n \n#your script, in this case: write the hostname and the ids of the chosen gpus.\nhostname\necho $CUDA_VISIBLE_DEVICES\nsource /home/vtx829/.env/bin/activate\n\nDOMAINS=(taxi hotel restaurant attraction train);\n#DOMAINS=(attraction);\n#DOMAINS=(hotel);\ngpu=1\n\nfor domain in ${DOMAINS[@]};\ndo\n pretrain=(${DOMAINS[@]//*$domain*}); # all but $domain\n echo \"==============================\"\n date;\n echo \"Target domain: \" $domain;\n echo \" Pretrain:\" ${pretrain[@]};\n python -m run --train_domains ${pretrain[@]} --eval_domains $pretrain --lr 0.00003 --epochs 1000 --gpu $gpu -n p/pretrain-$domain --max_dev_dialogs 150 --delexicalize_labels --elmo --pooled --batch_size 16 --patience 10 --train_strict &> logs/pretrain-$domain.log \n\n ((gpu++)) ;\ndone\n\n\n" }, { "alpha_fraction": 0.6981678009033203, "alphanum_fraction": 0.7232401371002197, "avg_line_length": 24.924999237060547, "blob_id": "5f6e168e7d3512e2f509737702b71b39d9ba366a", "content_id": "213a5e5226307785e3497bd46dffd7b3f5887136", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1037, "license_type": "no_license", "max_line_length": 144, "num_lines": 40, "path": "/README.md", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "# Domain Transfer in Dialogue Systems without Turn-Level Supervision\n \nThis is the code repository for our paper [Domain Transfer in Dialogue Systems without Turn-Level Supervision](http://arxiv.org/abs/1909.07101).\n\n## Run it\n\n##### Install dependencies\n```\npip install -r requirements.txt\n```\n\n##### Run training script\n```\npython run.py\n```\nThis script gives you plenty of options for arguments \ncontrolling training, and it's also where you specifiy what\ndata you'll work on. Please run `python run.py -h` for \nan overview of options. \n\n##### Evaluating \nIf you just want to evaluate a trained model and produce \npredictions, you do:\n ```\npython run.py --test\n```\n \n \n## Cite\n \nIf you use this code, please cite the paper. Bibtex:\n```\n@article{bingel2019domain,\n title={Domain Transfer in Dialogue Systems without Turn-Level Supervision},\n author={Bingel, Joachim and Petr\\'en Bach Hansen, Victor and Gonzalez, Ana Valeria and Budzianowski, Pawe{\\l} and Augenstein, Isabelle and S{\\o}gaard, Anders},\n journal={arXiv preprint arXiv:1909.07101},\n year={2019}\n}\n\n```\n" }, { "alpha_fraction": 0.5608993768692017, "alphanum_fraction": 0.5666465163230896, "avg_line_length": 30.788461685180664, "blob_id": "80b7d5568e2e954c61c442fd3c751500cf791e31", "content_id": "76ec89ac7ae644937dc27b96bf72e0e004f010b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9918, "license_type": "no_license", "max_line_length": 105, "num_lines": 312, "path": "/util/util.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "import json\nimport logging\nimport os\nimport torch\nimport pickle\nfrom pprint import pformat, pprint\nfrom importlib import import_module\nfrom vocab import Vocab\nfrom util.dataset import Dataset, Ontology\nfrom util.preprocess_data import dann\nfrom tqdm import tqdm\nfrom util.data import *\nimport argparse\n\n\ndef load_dataset(splits=('train', 'dev', 'test'), domains='all', strict=False,\n base_path=None, elmo=False):\n \"\"\"\n\n :param splits:\n :param domains: filter for domains (if 'all', use all available)\n :param strict: if True, select only dialogs that contain only a single domain\n :return:\n \"\"\"\n path = base_path if base_path else dann\n # TODO implement filtering with `domains` and `strict`\n with open(os.path.join(path, 'ontology.json')) as f:\n ontology = Ontology.from_dict(json.load(f))\n with open(os.path.join(path, 'vocab.json')) as f:\n vocab = Vocab.from_dict(json.load(f))\n with open(os.path.join(path, 'emb.json')) as f:\n E = json.load(f)\n\n w2v = {w: E[i] for i, w in enumerate(vocab.to_dict()['index2word'])}\n\n dataset = {}\n for split in splits:\n with open(os.path.join(path, '{}.json'.format(split))) as f:\n logging.warn('loading split {}'.format(split))\n dataset[split] = Dataset.from_dict(json.load(f))\n\n logging.info('dataset sizes: {}'.format(pformat({k: len(v) for k, v in dataset.items()})))\n return dataset, ontology, vocab, w2v\n\n\ndef load_dataset_elmo(splits=('train', 'dev', 'test'), base_path=None):\n \"\"\"\n \"\"\"\n logging.info('Loading ELMo featurized data')\n\n path = base_path if base_path else dann\n\n dataset = {}\n for split in splits:\n with open(os.path.join(path, '{}_elmo.pkl'.format(split)), 'rb') as f:\n logging.warn('loading split {}'.format(split))\n data = pickle.load(f)\n dataset[split] = data\n\n s2v = pickle.load(open(os.path.join(path, 's2v_elmo.pkl'), 'rb'))\n\n logging.info('dataset sizes: {}'.format(pformat({k: len(v) for k, v in dataset.items()})))\n return dataset, s2v\n\n\n\ndef generate_dataset_elmo(elmo, splits=('train', 'dev', 'test'), domains='all', strict=False,\n base_path=None):\n \"\"\"\n \"\"\"\n path = base_path if base_path else ''\n with open(os.path.join(path, 'ontology.json')) as f:\n ontology = Ontology.from_dict(json.load(f))\n\n dataset = {}\n for split in splits:\n with open(os.path.join(path, '{}.json'.format(split))) as f:\n logging.warn('loading split {}'.format(split))\n data = Dataset.from_dict(json.load(f))\n #data.dialogues = data.dialogues[:500]\n data.to_elmo(elmo)\n dataset[split] = data\n\n logging.info('dataset sizes: {}'.format(pformat({k: len(v) for k, v in dataset.items()})))\n return dataset, ontology\n\n\ndef get_models():\n return [m.replace('.py', '') for m in os.listdir('models') if not m.startswith('_') and m != 'model']\n\n\ndef load_model(*args, **kwargs):\n StateNet = import_module(\"models.statenet\").StateNet\n model = StateNet(*args, **kwargs)\n logging.info('loaded model.')\n return model\n\n\ndef split_on_uppercase(s, keep_contiguous=False):\n \"\"\"\n From https://stackoverflow.com/questions/2277352/\n\n Args:\n s (str): string\n keep_contiguous (bool): flag to indicate we want to\n keep contiguous uppercase chars together\n\n Returns:\n\n \"\"\"\n\n string_length = len(s)\n is_lower_around = (lambda: s[i-1].islower() or\n string_length > (i + 1) and s[i + 1].islower())\n\n start = 0\n parts = []\n for i in range(1, string_length):\n if s[i].isupper() and (not keep_contiguous or is_lower_around()):\n parts.append(s[start: i])\n start = i\n parts.append(s[start:])\n\n return parts\n\n\ndef delexicalize(s2v):\n allowed_slots = [\n \"attraction-area\",\n \"attraction-name\",\n \"attraction-type\",\n \"hotel-area\",\n \"hotel-day\",\n \"hotel-internet\",\n \"hotel-name\",\n \"hotel-parking\",\n \"hotel-people\",\n \"hotel-pricerange\",\n \"hotel-stars\",\n \"hotel-stay\",\n \"hotel-type\",\n \"restaurant-area\",\n \"restaurant-day\",\n \"restaurant-food\",\n \"restaurant-name\",\n \"restaurant-people\",\n \"restaurant-pricerange\",\n \"restaurant-time\",\n \"taxi-arriveBy\",\n \"taxi-leaveAt\",\n \"taxi-type\",\n \"train-arriveBy\",\n \"train-day\",\n \"train-leaveAt\",\n \"train-people\"]\n out = {}\n for s, v in s2v.items():\n if s in allowed_slots:\n out[s] = v\n else:\n out[s] = [\"<true>\"]\n return out\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef fix_s2v(_s2v, dialogs, splits=('train', 'dev', 'test')):\n all_slots = set()\n s2v_new = {}\n for s in splits:\n for d in dialogs[s]:\n for t in d['turns']:\n for s, v in t['turn_label']:\n all_slots.add(s)\n\n for s in all_slots:\n s2v_new[s] = _s2v[s]\n\n return s2v_new\n\n\ndef featurize_s2v(s2v_dict, slot_featurizer, value_featurizer, elmo=False, elmo_pool=False):\n out = {}\n print(\"Featurizing slots and values...\")\n for s, vs in tqdm(s2v_dict.items()):\n # remove domain prefix ('restaurant-priceRange' -> 'priceRange')\n if '-' in s:\n domain, slot = s.split(\"-\", 1)\n # split at uppercase to get vectors ('priceRange' -> ['price', 'range'])\n words = split_on_uppercase(slot, keep_contiguous=True)\n if elmo:\n if elmo_pooled:\n # POOLED EMBEDDINGS?\n _, slot_emb = slot_featurizer.featurize_turn(words)\n _, v_embs = value_featurizer.featurize_batch([v.split() for v in vs])\n else:\n slot_emb, _ = slot_featurizer.featurize_turn(words)\n v_embs, _ = value_featurizer.featurize_batch([v.split() for v in vs])\n else:\n slot_emb = slot_featurizer.featurize_turn(words)\n v_embs = value_featurizer.featurize_batch([v.split() for v in vs])\n vs_out = [Value(v, v_embs[idx], idx)\n for idx, v in enumerate(vs)]\n out[s] = Slot(domain, slot_emb, vs_out)\n return out\n\n\ndef filter_dialogs(data, domains, strict, max_dialogs, max_turns_per_dialog):\n out = []\n for dg in data:\n if len(dg['turns']) > max_turns_per_dialog > 0:\n continue\n\n # # # Check domain constraints # # #\n # if 'all' in domains, don't worry about anything, else\n # check how allowed domains and dialog domain intersect\n if 'all' not in domains:\n dialog_domains = set(dg['domain'])\n allowed_domains = set(domains)\n\n # strictly restricted to some domain(s), check that\n # dialog has no other domains\n if strict:\n if not dialog_domains.issubset(allowed_domains):\n continue\n # else, check there's at least one valid domain in the dialog\n else:\n if not allowed_domains.intersection(dialog_domains):\n continue\n out.append(dg)\n\n if len(out) == max_dialogs:\n break\n return out\n\n\ndef get_device(device_id):\n if device_id is not None and torch.cuda.is_available():\n num_gpus = torch.cuda.device_count()\n gpu = device_id % num_gpus\n return torch.device('cuda:{}'.format(gpu))\n else:\n return torch.device('cpu')\n\n\ndef make_batches(dialogs, batch_size):\n dialogs = list(dialogs)\n slices = [(i*batch_size, (i+1)*batch_size)\n for i in range(len(dialogs)//batch_size + 1)]\n for beg, end in slices:\n if beg < len(dialogs)-1:\n yield dialogs[beg:end]\n\n\ndef turns_first(dialogs):\n pad_turn = dialogs[0].turns[0]\n x_utt = [pad_turn.x_utt[k] * 0 for k in range(len(pad_turn.x_utt))]\n x_act = pad_turn.x_act * 0\n x_sys = pad_turn.x_sys * 0\n pad_turn = Turn(\" \", \" \", \" \", x_utt, x_act, x_sys, {}, {}, {})\n\n max_turns = max([len(dialog.turns) for dialog in dialogs])\n turns = [[] for _ in range(max_turns)]\n mask = [0 for _ in range(len(dialogs))]\n for d, dialog in enumerate(dialogs):\n for t in range(max_turns):\n if t < len(dialog.turns):\n turns[t].append(dialog.turns[t])\n mask[d] = t+1\n else:\n turns[t].append(pad_turn)\n return turns, mask\n\n\ndef invert_slot_turns(turn_probs, batch_size):\n turn_probs_out = [{} for _ in range(batch_size)]\n for slot_id, turns in turn_probs.items():\n for t, turn in enumerate(turns):\n turn_probs_out[t][slot_id] = turn\n return turn_probs_out\n\n\ndef s2v_to_device(s2v, device):\n out = {}\n for s, vs in s2v.items():\n dom, slot_emb, vs_out = s2v[s]\n vs_out = [Value(v, v_emb.to(device), idx) for v, v_emb, idx in vs_out]\n out[s] = Slot(dom, slot_emb.to(device), vs_out)\n return out\n\n\n# def s2v_to_device(s2v, device):\n# s2v_new = {}\n# for slot_name, slot in s2v.items():\n# slot_emb = torch.cuda.FloatTensor(slot.embedding.to(device))\n# if 'cuda' not in slot_emb.device.type:\n# slot_emb = slot_emb.to(device)\n# vals_new = []\n# for val in slot.values:\n# val_emb = torch.cuda.FloatTensor(val.embedding.to(device))\n# if 'cuda' not in val_emb.device.type:\n# val_emb = val_emb.to(device)\n# vals_new.append(Value(val.value, val_emb, val.idx))\n# s2v_new[slot_name] = Slot(slot.domain, slot_emb, vals_new)\n# return s2v_new\n" }, { "alpha_fraction": 0.5361291766166687, "alphanum_fraction": 0.5373104810714722, "avg_line_length": 34.64210510253906, "blob_id": "39e692c0df75f1682d8e32e136ae99651626c249", "content_id": "87134da92f1c105a9a21ecda8de69927de48d742", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10158, "license_type": "no_license", "max_line_length": 157, "num_lines": 285, "path": "/util/dataset.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "import json\nfrom collections import defaultdict\nimport numpy as np\nfrom tqdm import tqdm\nfrom stanza.nlp.corenlp import CoreNLPClient\nfrom nltk import word_tokenize\nfrom pprint import pprint\n\nclient = None\n\nsys_act_map = {'Dest': 'destination',\n 'Ref': 'reference',\n '=': 'is',\n 'Addr': 'address',\n '?': 'unknown',\n 'a': 'a'}\n\n\n\ndef annotate(sent):\n global client\n if client is None:\n client = CoreNLPClient(default_annotators='ssplit,tokenize'.split(','))\n words = []\n for sent in client.annotate(sent).sentences:\n for tok in sent:\n words.append(tok.word)\n return words\n\n\nclass Turn:\n\n def __init__(self, turn_id, transcript, turn_label, belief_state, system_acts, system_transcript, num=None):\n self.id = turn_id\n self.transcript = transcript\n self.turn_label = turn_label\n self.belief_state = belief_state\n self.system_acts = system_acts\n #self.system_transcript = word_tokenize(system_transcript)\n self.system_transcript = system_transcript\n self.num = num or {}\n\n def to_dict(self, elmo=False):\n if elmo:\n return {'turn_id': self.id,\n 'transcript': self.transcript,\n 'turn_label': self.turn_label,\n 'belief_state': self.belief_state,\n 'system_acts': self.system_acts,\n 'system_transcript': self.system_transcript,\n #'usr_trans_elmo': self.usr_trans_elmo,\n 'usr_trans_elmo_pool': self.usr_trans_elmo_pool,\n #'sys_trans_elmo': self.sys_trans_elmo,\n 'sys_trans_elmo_pool': self.sys_trans_elmo_pool,\n #'sys_acts_elmo': self.sys_acts_elmo}\n 'sys_acts_elmo_pool': self.sys_acts_elmo_pool}\n else:\n return {'turn_id': self.id,\n 'transcript': self.transcript,\n 'turn_label': self.turn_label,\n 'belief_state': self.belief_state,\n 'system_acts': self.system_acts,\n 'system_transcript': self.system_transcript}\n #, 'num': self.num}\n\n @classmethod\n def from_dict(cls, d):\n return cls(**d)\n\n @classmethod\n def annotate_raw(cls, raw):\n system_acts = []\n for a in raw['system_acts']:\n if isinstance(a, list):\n s, v = a\n system_acts.append(['inform'] + s.split() + ['='] + v.split())\n else:\n system_acts.append(['request'] + a.split())\n\n # NOTE: fix inconsistencies in data label\n #adjusting the raw turn label to deal with the delexicalization\n adj_label = []\n for l in raw['turn_label']:\n if len(l)>2:\n adj_label.append(l[0:2])\n else:\n adj_label.append(l)\n\n fix = {'centre': 'center', 'areas': 'area', 'phone number': 'number'}\n return cls(\n turn_id=raw['turn_idx'],\n transcript=annotate(raw['transcript']),\n system_acts=system_acts,\n turn_label=[[fix.get(s.strip(), s.strip()), fix.get(v.strip(), v.strip())] for s, v in adj_label],# raw['turn_label']],\n belief_state=raw['belief_state'],\n system_transcript=raw['system_transcript'],\n )\n\n def numericalize_(self, vocab):\n self.num['transcript'] = vocab.word2index(['<sos>'] + [w.lower() for w in self.transcript + ['<eos>']], train=True)\n self.num['system_acts'] = [vocab.word2index(['<sos>'] + [w.lower() for w in a] + ['<eos>'], train=True) for a in self.system_acts + [['<sentinel>']]]\n\n\nclass Dialogue:\n\n def __init__(self, dialogue_id, turns, domain, elmo=None):\n self.id = dialogue_id\n self.turns = turns\n self.domain = domain\n self.elmo = elmo\n\n def __len__(self):\n return len(self.turns)\n\n def to_dict(self):\n return { 'dialogue_id': self.id, 'turns': [t.to_dict(elmo=self.elmo) for t in self.turns],'domain': self.domain}\n\n def to_elmo(self, elmo):\n\n self.elmo = True\n\n utt, sys = elmo\n usr_utts, sys_utts, sys_acts = [], [], []\n for turn in self.turns:\n # Fix system acts to more natural language\n turn.system_acts = [[sys_act_map.get(t, t) for t in tu] for tu in turn.system_acts]\n\n # Batch utterances and system acts\n usr_utts.append(turn.transcript)\n sys_utts.append(turn.system_transcript)\n sys_acts.append(turn.system_acts)\n\n # Featurize\n usr_embs, pooled_usr = utt.featurize_batch(usr_utts)\n sys_embs, pooled_sys = utt.featurize_batch(sys_utts)\n sys_act_embs, sys_act_pooled = sys.featurize_batch(sys_acts)\n\n # Add both list of ELMO embs for each token and a pooled one with different key\n for i, turn in enumerate(self.turns):\n #setattr(turn, 'usr_trans_elmo', usr_embs[i])\n setattr(turn, 'usr_trans_elmo_pool', pooled_usr[i])\n #setattr(turn, 'sys_trans_elmo', sys_embs[i])\n setattr(turn, 'sys_trans_elmo_pool', pooled_sys[i])\n #setattr(turn, 'sys_acts_elmo', sys_act_embs[i])\n setattr(turn, 'sys_acts_elmo_pool', sys_act_pooled[i])\n\n return self\n\n @classmethod\n def from_dict(cls, d):\n return cls(d['dialogue_id'], [Turn.from_dict(t) for t in d['turns']], d['domain'])\n\n @classmethod\n def annotate_raw(cls, raw):\n return cls(raw['dialogue_idx'], [Turn.annotate_raw(t) for t in raw['dialogue']], raw['domain'])\n\n\nclass Dataset:\n\n def __init__(self, dialogues):\n self.dialogues = dialogues\n\n def __len__(self):\n return len(self.dialogues)\n\n def iter_dialogs(self):\n for d in self.dialogues:\n yield d\n\n def iter_turns(self):\n for d in self.dialogues:\n for t in d.turns:\n yield t\n\n def to_elmo(self, elmo):\n dialogues_elmo = []\n for d in tqdm(self.dialogues, desc='Adding ELMo features'):\n e = d.to_elmo(elmo)\n dialogues_elmo.append(e)\n self.dialogues = dialogues_elmo\n\n def to_dict(self):\n return {'dialogues': [d.to_dict() for d in self.dialogues]}\n\n @classmethod\n def from_dict(cls, d, elmo=None):\n return cls([Dialogue.from_dict(dd) for dd in tqdm(d['dialogues'])])\n\n\n def annotate_raw(cls, fname):\n with open(fname) as f:\n data = json.load(f)\n return cls([Dialogue.annotate_raw(d) for d in tqdm(data)])\n\n def numericalize_(self, vocab):\n for t in self.iter_turns():\n t.numericalize_(vocab)\n def extract_ontology(self):\n slots = set()\n values = defaultdict(set)\n for t in self.iter_turns():\n for s, v in t.turn_label:\n slots.add(s)\n values[s].add(v.lower())\n\n return Ontology(sorted(list(slots)), {k: sorted(list(v)) for k, v in values.items()})\n\n\n def batch(self, batch_size, shuffle=False, whole_dialogs=False):\n if whole_dialogs:\n iter_items = list(self.iter_dialogs())\n else:\n iter_items = list(self.iter_turns())\n if shuffle:\n np.random.shuffle(iter_items )\n for i in tqdm(range(0, len(iter_items ), batch_size)):\n yield iter_items [i:i+batch_size]\n\n def evaluate_preds(self, preds):\n request = []\n inform = []\n joint_goal = []\n fix = {'centre': 'center', 'areas': 'area', 'phone number': 'number'}\n i = 0\n for d in self.dialogues:\n pred_state = {}\n for t in d.turns:\n gold_request = set([(s, v) for s, v in t.turn_label if s == 'request'])\n gold_inform = set([(s, v) for s, v in t.turn_label if s != 'request'])\n pred_request = set([(s, v) for s, v in preds[i] if s == 'request'])\n pred_inform = set([(s, v) for s, v in preds[i] if s != 'request'])\n request.append(gold_request == pred_request)\n inform.append(gold_inform == pred_inform)\n\n gold_recovered = set()\n pred_recovered = set()\n for s, v in pred_inform:\n pred_state[s] = v\n for b in t.belief_state:\n for s, v in b['slots']:\n if b['act'] != 'request':\n gold_recovered.add((b['act'], fix.get(s.strip(), s.strip()), fix.get(v.strip(), v.strip())))\n for s, v in pred_state.items():\n pred_recovered.add(('inform', s, v))\n joint_goal.append(gold_recovered == pred_recovered)\n i += 1\n return {'turn_inform': np.mean(inform), 'turn_request': np.mean(request), 'joint_goal': np.mean(joint_goal)}\n\n def record_preds(self, preds, to_file):\n data = self.to_dict()\n i = 0\n for d in data['dialogues']:\n for t in d['turns']:\n t['pred'] = sorted(list(preds[i]))\n i += 1\n with open(to_file, 'wt') as f:\n json.dump(data, f)\n\n\nclass Ontology:\n\n def __init__(self, slots=None, values=None, num=None):\n self.slots = slots or []\n self.values = values or {}\n self.num = num or {}\n\n def __add__(self, another):\n new_slots = sorted(list(set(self.slots + another.slots)))\n new_values = {s: sorted(list(set(self.values.get(s, []) + another.values.get(s, [])))) for s in new_slots}\n return Ontology(new_slots, new_values)\n\n def __radd__(self, another):\n return self if another == 0 else self.__add__(another)\n\n def to_dict(self):\n return {'slots': self.slots, 'values': self.values, 'num': self.num}\n\n def numericalize_(self, vocab):\n self.num = {}\n for s, vs in self.values.items():\n self.num[s] = [vocab.word2index(annotate('{} = {}'.format(s, v)) + ['<eos>'], train=True) for v in vs]\n\n @classmethod\n def from_dict(cls, d):\n return cls(**d)\n" }, { "alpha_fraction": 0.5408118963241577, "alphanum_fraction": 0.5456132888793945, "avg_line_length": 29.546667098999023, "blob_id": "52b3a82b6f02c9cd02ae5621a80b96a3ab71e160", "content_id": "9bf935499ff602cabef654b5a522d5bed5dc3947", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2291, "license_type": "no_license", "max_line_length": 108, "num_lines": 75, "path": "/util/joint_goalacc.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport argparse\nimport operator\n\npath1 = 'statenet/'\n#this folder should contain the predictions for each experiment i.e. statenet/train-only/prediction_dv.json\n#inside each experiment folder a new file is created, containing the revised accuracies\n#computing per slot accuracy\n\n\nparser = argparse.ArgumentParser(description='Input domain specification')\nparser.add_argument('domain', \n help='The domain you input should correspond to a folder in statenet/')\n \n\nargs = parser.parse_args()\nprint(args.domain)\ncur_path = path1 +'/'+ args.domain\n\n\ndef write_output_file(accuracies, path):\n highest = accuracies[max(accuracies.items(), key=operator.itemgetter(1))[0]]\n best_epoch = max(accuracies.items(), key=operator.itemgetter(1))[0]\n\n output = open(path +\"/\"+'revised_accuracy.txt', 'w')\n\n\n for key, value in accuracies.items():\n output.write('\\n')\n output.write(key + \" : \" + str(value))\n\n\n output.write('\\n')\n output.write('\\n')\n output.write(\"Best epoch: \" + best_epoch + \" : \" + str(highest))\n\n output.close()\n \n return print(\"Done outputting file to: %s\" %path)\n\ndef compute_acc(path):\n accuracies = {}\n for item in os.listdir(path):\n if \"prediction_dv\" in item:\n \n f = open(path+ \"/\" +item, 'r')\n preds = json.load(f)\n count = 0\n correct = 0\n for i in range(len(preds)):\n for turn in preds[i]['turns']:\n ref = list(turn['gold'].items())\n pred = list(turn['pred'].items())\n\n if ref == [] :\n if pred == []:\n count = count + 1\n correct = correct + 1\n else:\n count = count + 1\n else:\n \n intersection = list(set(ref).intersection(pred))\n\n count = count + len(ref)\n correct = correct + len(intersection)\n accuracies[item]= correct/count\n \n write_output_file(accuracies, path)\n return accuracies\n\n#compute\nprint(\"Computing accuracies for \" , args.domain)\ncompute_acc(cur_path)\n" }, { "alpha_fraction": 0.6736263632774353, "alphanum_fraction": 0.70659339427948, "avg_line_length": 36.875, "blob_id": "dc06a31747c441d5cf0d573bfb7f3b3d6525c203", "content_id": "90a32f1f4e0843c0da27c755788301d8dc8808f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 910, "license_type": "no_license", "max_line_length": 214, "num_lines": 24, "path": "/scripts/pretrain_domains_all_elmo.sh", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# normal cpu stuff: allocate cpus, memory\n#SBATCH --ntasks=1 --cpus-per-task=10 --mem=6000M\n# we run on the gpu partition and we allocate 2 titanx gpus\n#SBATCH -p gpu --gres=gpu:gtx1080:1\n#We expect that our program should not run langer than 4 hours\n#Note that a program will be killed once it exceeds this time!\n#SBATCH --time=20:00:00\n\n#your script, in this case: write the hostname and the ids of the chosen gpus.\n\nhostname\necho $CUDA_VISIBLE_DEVICES\n\nsource /home/grn762/projects/dialog-rl/dialog_env/bin/activate\n\npretrain=(taxi restaurant attraction hotel train)\nname=pretrain-all-elmo\nbsize=32\ngpu=1\n\necho \"==============================\"\necho \" Pretrain:\" ${pretrain[@]};\npython -m run --train_domains ${pretrain[@]} --batch_size $bsize --eval_domains ${pretrain[@]} --gpu $gpu -n $name --delexicalize_labels --epochs 100 --elmo --pooled 2> logs/$name-stdout.log > logs/$name-stderr.log\n\n" }, { "alpha_fraction": 0.4668531119823456, "alphanum_fraction": 0.47466689348220825, "avg_line_length": 36.50308609008789, "blob_id": "3b7a5c637fbeec84a682ec7a1bc2006b9c481c5d", "content_id": "eb0f6544d46356f85b961fee9ac80cad086ee885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12158, "license_type": "no_license", "max_line_length": 233, "num_lines": 324, "path": "/Preprocessing/2woz_format.py", "repo_name": "coastalcph/dialog-rl", "src_encoding": "UTF-8", "text": "# coding: utf-8\nimport json\nfrom pprint import pprint\n\n\nwith open('data.json') as f:\n data = json.load(f)\n\nwith open('ontology.json') as f:\n ont = json.load(f)\n \nwith open('dialogue_acts.json') as f:\n acts = json.load(f)\n \nwith open('testListFile.json') as f:\n test = [line.strip() for line in f.readlines()]\n\nwith open('valListFile.json') as f:\n dev = [line.strip() for line in f.readlines()]\n \n\ndef get_past_acts(belief, past_acts):\n\n for i in range(len( belief)):\n cur = belief[i]\n if str(cur['slots']) in past_acts.keys(): \n \n belief[i] = {'slots': cur['slots'], 'act': past_acts[str(cur['slots'])]}\n \n return belief\n\ndef get_domain(entry):\n domains = []\n for key, value in entry['goal'].items():\n if not entry['goal'][key]:\n pass\n elif key != 'message' and key!= 'topic':\n domains.append(key) \n return domains\n\ndef get_label(dictionary, domain):\n cur_turn_label = []\n for key2, value in dictionary.items():\n if value != 'not mentioned' and value != '':\n cur_turn_label.append([str(domain)+'-'+str(key2), value])\n return cur_turn_label\n \ndef get_belief_state(entry, domains, k, acts, past_acts ):\n \n \n turn_label = []\n belief_state = []\n #cur_turn_label = []\n turn_label_update = []\n for domain in domains:\n \n semi = entry['log'][k]['metadata'][domain]['semi']\n book = entry['log'][k]['metadata'][domain]['book']\n \n if k != 1 and k != 0 :\n \n prev_semi = entry['log'][k-2]['metadata'][domain]['semi']\n prev_book = entry['log'][k-2]['metadata'][domain]['book']\n \n cur_turn_label1 = get_label(semi, domain)\n turn_label1 = get_label(prev_semi, domain)\n \n cur_turn_label2 = get_label(book, domain)\n turn_label2 = get_label(prev_book, domain)\n \n cur_turn_label = cur_turn_label1 + cur_turn_label2\n turn_label = turn_label1 + turn_label2\n \n \n for i in range(len(cur_turn_label)):\n if cur_turn_label[i] not in turn_label:\n turn_label_update.append(cur_turn_label[i])\n \n else:\n cur_turn_label2 = get_label(book, domain)\n cur_turn_label1 = get_label(semi, domain)\n cur_turn_label = cur_turn_label1 + cur_turn_label2\n \n prev_turn_label = []\n for i in range(len(cur_turn_label)):\n if cur_turn_label[i] not in prev_turn_label:\n turn_label_update.append(cur_turn_label[i])\n\n for slot_pair in cur_turn_label:\n if len(slot_pair[1]) != 0 and type(slot_pair[1][0]) == dict:\n d = slot_pair[1][0]\n \n slots_new = []\n for key, value in d.items():\n \n slots_new.append([str(domain)+'-'+str(key),value])\n #k is the turn number \n for p in slots_new:\n belief_state.append({'slots': p, 'act': \"\"})\n else: \n belief_state.append({'slots': slot_pair, 'act':''})\n \n return belief_state, turn_label_update, past_acts\n\n\ndef check_dict(sample):\n instances = [isinstance(x, dict) for x in sample]\n return True in instances\n\ndef clean_data(data):\n \n for i in range(len(data)):\n domains = data[i]['domain']\n look = [str(domain)+\"-\"+'booked'for domain in domains]\n \n for turn in data[i]['dialogue']:\n new_belief = []\n if len(turn['belief_state']) != 0:\n for m in look:\n for j in range(len(turn['belief_state'])):\n \n if 'booked' not in turn['belief_state'][j]['slots'][0] and turn['belief_state'][j]['slots'][1] != []:\n if {'slots': turn['belief_state'][j]['slots'], \"act\": \"\"} not in new_belief:\n\n new_belief.append({'slots': turn['belief_state'][j]['slots'], \"act\": \"\"})\n \n if [m, []] in turn['turn_label']:\n #print([m, []] )\n turn['turn_label'].remove([m, []])\n if len(turn['turn_label']) > 0:\n \n for label in turn['turn_label']:\n if label[0]== 'restaurant-booked':\n \n pass\n if 'booked' in label[0]:\n \n dom = label[0].replace('booked',\"\")\n cur = label\n turn['turn_label'].remove(label) \n \n #print(label[1], len(label[1]))\n if check_dict(label[1]) == True:\n #print(label[1][0])\n for z in range(len(label[1])):\n for key, value in label[1][z].items():\n turn['turn_label'].append([dom+str(key),value])\n\n \n \n\n turn['belief_state'] = new_belief\n return data\n\ndef act_list(cur_act):\n cur_dialogue = []\n for item in list(cur_act.values()):\n if type(item) != str:\n\n for key in item.keys():\n\n for i in item[key]:\n cur_item = i[0].lower()\n if cur_item != 'none':\n cur_dialogue.append({i[0]:(key,i[1])})\n return cur_dialogue\ndef save_data(split, _set):\n with open(split+'.json', 'w') as fp:\n json.dump(_set, fp, sort_keys=True, indent=4)\n\n\n\n\nidx = [i for i in range(len(data.keys()))]\nall_files = [key for key in data.keys()]\nid2dial = dict(zip(idx, all_files))\ndial2id = dict(zip( all_files, idx))\n\n\n\ndict_list = []\npast_acts = {}\nfor i in idx:\n dial_dict = {}\n dial_dict['dialogue_idx']= i\n dial_dict['goal']= data[id2dial[i]]['goal']\n domains = get_domain(data[id2dial[i]]) \n dial_dict['domain'] = domains\n dial_dict['dialogue'] = []\n current_dict = {}\n turn_idx = 0\n \n for k in range(len(data[id2dial[i]]['log'])):\n if k == 0: #on the first utterance the system transcript is always empty\n current_dict['transcript'] = data[id2dial[i]]['log'][k]['text']\n current_dict['turn_idx'] = turn_idx\n belief_state, turn_labels, past_acts = get_belief_state(data[id2dial[i]],domains, k+1 , acts[id2dial[i].split('.')[0]], past_acts)\n current_dict['belief_state'] = belief_state\n current_dict['turn_label'] = turn_labels\n current_dict['system_transcript'] = \"\"\n current_dict[\"system_acts\"] = []\n \n if bool(data[id2dial[i]]['log'][k]['metadata']) == False: #if it is false, this is a user utterance\n current_dict['transcript'] = data[id2dial[i]]['log'][k]['text']\n try:\n \n belief_state, turn_labels, past_acts = get_belief_state(data[id2dial[i]],domains, k+1, acts[id2dial[i].split('.')[0]], past_acts )\n \n current_dict['belief_state'] = belief_state\n current_dict['turn_label'] = turn_labels\n except Exception:\n print(i)\n current_dict['turn_idx'] = turn_idx\n dial_dict['dialogue'].append(current_dict)\n current_dict = {} #initialize dictionary after turn has ended\n # a turn consists of system utterance and user utterance + other meta data\n \n else: \n current_dict['system_transcript'] = data[id2dial[i]]['log'][k]['text']\n current_dict[\"system_acts\"] = []\n try:\n belief_state, turn_labels, past_acts = get_belief_state(data[id2dial[i]],domains, k , acts[id2dial[i].split('.')[0]], past_acts)\n current_dict['belief_state'] = belief_state\n current_dict['turn_label'] = turn_labels\n except Exception:\n pass\n turn_idx += 1 \n if k == len(data[id2dial[i]]['log']) -1:\n \n \n current_dict['turn_idx'] = turn_idx\n current_dict['transcript'] = \"\"\n dial_dict['dialogue'].append(current_dict)\n\n dict_list.append(dial_dict)\n\n\n###########################################################\n################# TURN LABELS ####################\n###########################################################\n\nall_acts = []\nfor index in idx: \n cur_dialogue = act_list(acts[id2dial[index].split('.')[0]])\n for turn in acts[id2dial[index].split('.')[0]].keys():\n if type(acts[id2dial[index].split('.')[0]][turn]) == str:\n pass\n else:\n try:\n for key in acts[id2dial[index].split('.')[0]][turn].keys():\n for alist in acts[id2dial[index].split('.')[0]][turn][key]:\n new = [key, alist[0], alist[1]]\n\n cleaned_list[index]['dialogue'][int(turn)]['turn_label'].append(new)\n for j in range(len(cleaned_list[index]['dialogue'][int(turn)]['turn_label'])):\n if len(cleaned_list[index]['dialogue'][int(turn)]['turn_label'][j]) == 2:\n cleaned_list[index]['dialogue'][int(turn)]['turn_label'][j] = ['Inform', cleaned_list[index]['dialogue'][int(turn)]['turn_label'][j][0], cleaned_list[index]['dialogue'][int(turn)]['turn_label'][j][1]]\n except Exception:\n pass\n\n \n all_acts.append(cur_dialogue)\n \n\n###########################################################\n################# BELIEF STATE ####################\n###########################################################\n\nfor i in idx:\n for cur in all_acts[i]:\n for key in cur.keys():\n \n for turn_idx in range(len(cleaned_list[i]['dialogue'])):\n lower = key.lower()\n for j in range(len(cleaned_list[i]['dialogue'][turn_idx]['belief_state'])):\n slot = cleaned_list[i]['dialogue'][turn_idx]['belief_state'][j]['slots'][0]\n val = cleaned_list[i]['dialogue'][turn_idx]['belief_state'][j]['slots'][1]\n c = cleaned_list[i]['dialogue'][turn_idx]['belief_state'][j]['act']\n if lower in slot or cur[key][1] == val.lower():\n if c == \"\":\n cleaned_list[i]['dialogue'][turn_idx]['belief_state'][j]['act'] = cur[key][0]\n for turn_idx in range(len(cleaned_list[i]['dialogue'])):\n for j in range(len(cleaned_list[i]['dialogue'][turn_idx]['belief_state'])):\n c = cleaned_list[i]['dialogue'][turn_idx]['belief_state'][j]['act']\n if c== \"\":\n dom = slot.split('-')[0][0].upper()+slot.split('-')[0][1::]\n\n cleaned_list[i]['dialogue'][turn_idx]['belief_state'][j]['act'] = dom+'-Inform'\n\n\n\nfor i in idx:\n for key, value in acts[id2dial[i].split('.')[0]].items():\n cur_dialogue = cleaned_list[i]['dialogue']\n \n #turn['system_acts'].append()\n if type(value) != str:\n \n try:\n turn = cur_dialogue[int(key)]\n for k, v in value.items():\n for item in v:\n if item != ['none', 'none']:\n cleaned_list[i]['dialogue'][int(key)]['system_acts'].append(item)\n \n except Exception:\n pass\n \n\ntest_ids = [dial2id[f] for f in test]\ndev_ids = [dial2id[f] for f in dev]\ntrain_ids = list(set(set(idx) - set(test_ids))-set(dev_ids))\n\n\ntest_set = [cleaned_list[i] for i in test_ids]\ndev_set = [cleaned_list[i] for i in dev_ids]\ntrain_set = [cleaned_list[i] for i in train_ids]\n\n\n\n\nsave_data('train', train_set)\nsave_data('test', test_set)\nsave_data('dev', dev_set)\n\n \n\n" } ]
16
Walt280/PeoplezBot
https://github.com/Walt280/PeoplezBot
0917f71195c3875a10d31c7fbf103b7af3d2d5e2
2937fefe7388088ac5ff44080734a7c5d785ac1e
2cab90fef547132d8fc8862e3ecfcc62849a722d
refs/heads/master
2020-04-02T20:42:56.575126
2018-10-26T20:10:32
2018-10-26T20:10:32
154,776,898
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8405796885490417, "alphanum_fraction": 0.8405796885490417, "avg_line_length": 33.5, "blob_id": "60fdee5f0b15fda314cef5862156ecbccbede799", "content_id": "dea48cfb03c9324b43cc896f19cb37e898901582", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 69, "license_type": "no_license", "max_line_length": 55, "num_lines": 2, "path": "/README.md", "repo_name": "Walt280/PeoplezBot", "src_encoding": "UTF-8", "text": "# PeoplezBot\nCustom discord bot tailored for personal Discord server\n" }, { "alpha_fraction": 0.603823184967041, "alphanum_fraction": 0.6088411211967468, "avg_line_length": 22.761363983154297, "blob_id": "8d87bffa5a6423c3881a6c6f301f322ba14830cd", "content_id": "fccc84b4cbc062695c85243f26cd95bce238da6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4191, "license_type": "no_license", "max_line_length": 99, "num_lines": 176, "path": "/calculator_core.py", "repo_name": "Walt280/PeoplezBot", "src_encoding": "UTF-8", "text": "import math\n\n# Returns whether cur has higher precidence than cmp.\ndef _is_higher(cur, cmp):\n\tops = {\"n\":4, \"p\":4, \"^\":3, \"sqrt\":3, \"/\":2, \"*\":2, \"+\":1, \"-\":1, \"(\" : 0}\n\t\n\treturn ops[cur] > ops[cmp]\n\n# Returns whether the +/- sign is a unary or binary operator.\n# See http://wcipeg.com/wiki/Shunting_yard_algorithm\ndef _is_unary(prev):\n\treturn prev in \"(+-*/^\" or prev in [\"sqrt\"]\n\n# Converts the expression exp into tokens.\ndef _tokenize(exp):\n\ttokens = []\n\ttmp = \"\"\n\t\n\t# If current character is an operator, push to output list.\n\t# Else, move i to tmp to accumulate.\n\t# (tmp accumulator is for things like numbers and function names)\n\tfor i in exp:\n\t\tif i in \"+-/*()^\":\n\t\t\tif tmp:\n\t\t\t\ttokens.append(tmp)\n\t\t\t\ttmp = \"\"\n\t\t\ttokens.append(i)\n\t\telse:\n\t\t\ttmp += i\n\t\t\t\n\t\t\t#Special case of lone functions. (ex: sqrt 4)\n\t\t\tif tmp in [\"sqrt\"]:\n\t\t\t\ttokens.append(tmp)\n\t\t\t\ttmp = \"\"\n\t\t\t\t\n\tif tmp:\n\t\ttokens.append(tmp)\n\treturn tokens\n\n# Prepare the equation to be parsed: removes spaces and replaces unicode operators with ascii ones.\ndef _prep_eq(eq):\n\treturn eq.replace(\" \",\"\").replace(\"−\",\"-\").replace(\"÷\",\"/\").replace(\"×\",\"*\").replace(\"√\",\"sqrt\")\n\n# Check if exp is a integer or decimal number.\ndef _is_number(exp):\n\ttry:\n\t\tif(\".\" in exp):\n\t\t\tfloat(exp)\n\t\telse:\n\t\t\tint(exp)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False\n\n# Converts exp to either a decimal or integer number.\ndef _to_number(exp):\n\tif(\".\" in exp):\n\t\treturn float(exp)\n\telse:\n\t\treturn int(exp)\n\n# Exception messages.\ndef invalid_token():\n\treturn \"Invalid token\"\n\ndef unbalanced_parenthesis():\n\treturn \"Unbalanced parenthesis\"\n\t\n# Shunting yard algorithm\n# Converts a string infix equation to a reverse polish notation (RPN) list of tokens.\n# See https://en.wikipedia.org/wiki/Shunting-yard_algorithm\ndef parse_equation(equation):\n\tequation = _prep_eq(equation)\n\tstack = []\n\toutput = []\n\teq_tokens = _tokenize(equation)\n\teq_tokens.reverse()\n\t\n\tif(eq_tokens[-1] == \"-\"):\n\t\tstack.append(\"n\")\n\t\teq_tokens.pop()\n\t\n\tlast = \"\"\n\twhile eq_tokens:\n\t\tcur = eq_tokens.pop()\n\t\t\t\n\t\tif(cur == \"+\"):\n\t\t\tif(_is_unary(last)):\n\t\t\t\tstack.append(\"p\")\n\t\t\telse:\n\t\t\t\tif(stack and not _is_higher(cur, stack[-1])):\n\t\t\t\t\toutput.append(stack.pop())\n\t\t\t\tstack.append(cur)\n\t\t\t\n\t\telif(cur == \"-\"):\n\t\t\tif(_is_unary(last)):\n\t\t\t\tstack.append(\"n\")\n\t\t\telse:\n\t\t\t\tif(stack and not _is_higher(cur, stack[-1])):\n\t\t\t\t\toutput.append(stack.pop())\n\t\t\t\tstack.append(cur)\n\t\t# Exponentiation is special\n\t\telif(cur == \"^\"):\n\t\t\tif(stack and cur != stack[-1] and not _is_higher(cur, stack[-1])):\n\t\t\t\toutput.append(stack.pop())\n\t\t\tstack.append(cur)\n\t\telif(cur == \"/\" or cur == \"*\" or cur == \"sqrt\"):\n\t\t\tif(stack and not _is_higher(cur, stack[-1])):\n\t\t\t\toutput.append(stack.pop())\n\t\t\tstack.append(cur)\n\t\telif(cur == \"(\"):\n\t\t\tstack.append(cur)\n\t\telif(cur == \")\"):\n\t\t\twhile(stack and stack[-1] != \"(\"):\n\t\t\t\toutput.append(stack.pop())\n\t\t\t#Discard left parenthesis.\n\t\t\tstack.pop()\n\t\telif(_is_number(cur)):\n\t\t\toutput.append(cur)\n\t\telse:\n\t\t\traise ValueError(invalid_token())\n\t\tlast = cur\n\t\n\tif(\"(\" in stack or \")\" in stack):\n\t\traise ValueError(unbalanced_parenthesis())\n\t\t\n\twhile stack:\n\t\toutput.append(stack.pop())\n\t\n\treturn output\n\n# Evalutes the RPN list generated by parse_equation.\n# See https://en.wikipedia.org/wiki/Reverse_Polish_notation#Postfix_evaluation_algorithm\ndef eval_equation(eq, round_digits = -1):\n\tstack = []\n\t\n\tfor i in eq:\n\t\tif _is_number(i):\n\t\t\tstack.append(_to_number(i))\n\t\telse:\n\t\t\tresult = 0\n\t\t\t\n\t\t\tif(i == \"n\"):\n\t\t\t\tval = stack.pop()\n\t\t\t\tresult = val * -1\n\t\t\telif(i == \"sqrt\"):\n\t\t\t\tval = stack.pop()\n\t\t\t\tresult = math.sqrt(val)\n\t\t\telif(i == \"+\"):\n\t\t\t\tright = stack.pop()\n\t\t\t\tleft = stack.pop()\n\t\t\t\tresult = left + right\n\t\t\telif(i == \"-\"):\n\t\t\t\tright = stack.pop()\n\t\t\t\tleft = stack.pop()\n\t\t\t\tresult = left - right\n\t\t\telif(i == \"/\"):\n\t\t\t\tright = stack.pop()\n\t\t\t\tleft = stack.pop()\n\t\t\t\tresult = left / right\n\t\t\telif(i == \"*\"):\n\t\t\t\tright = stack.pop()\n\t\t\t\tleft = stack.pop()\n\t\t\t\tresult = left * right\n\t\t\telif(i == \"^\"):\n\t\t\t\tright = stack.pop()\n\t\t\t\tleft = stack.pop()\n\t\t\t\tresult = left ** right\n\t\t\telse:\n\t\t\t\traise ValueError(invalid_token())\n\t\t\t\n\t\t\tstack.append(result)\n\t\t\t\n\tif(round_digits > -1):\n\t\treturn round(stack.pop(), round_digits)\n\treturn stack.pop()\n\t\t\t" } ]
2
ravikiradoo/Speech_with_python
https://github.com/ravikiradoo/Speech_with_python
1c889275bb4f498e308fc3edc2e07376e08d6c83
fadd2cde5ca5efa3f2da1ea18ef394572deb7a59
66ea106a2f3f673bb2180cc54a642c70f5eb8ba5
refs/heads/master
2022-12-06T02:42:30.429364
2018-02-24T19:38:06
2018-02-24T19:38:06
122,024,551
0
1
null
2018-02-19T05:58:30
2018-02-19T05:59:32
2018-02-24T19:38:16
Python
[ { "alpha_fraction": 0.6840039491653442, "alphanum_fraction": 0.689892053604126, "avg_line_length": 33.965518951416016, "blob_id": "8f91518462a48fa49a33df50c1bb99bff2d0587a", "content_id": "c7fb119ef9d5d854dca6b83ccaca25010e35564c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1019, "license_type": "no_license", "max_line_length": 103, "num_lines": 29, "path": "/get_answer.py", "repo_name": "ravikiradoo/Speech_with_python", "src_encoding": "UTF-8", "text": "import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\nimport sys\n\nclass Fetcher:\n def __init__(self,url):\n self.driver=webdriver.Chrome(r'C:\\Users\\Student\\Downloads\\chromedriver_win32\\chromedriver.exe')\n self.driver.wait=WebDriverWait(self.driver,5)\n self.url=url\n self.lookup()\n\n def lookup(self):\n self.driver.get(self.url)\n try:\n ip=self.driver.wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME,\"gsfi\")))\n except:\n print(\"Failed\")\n\n soup=BeautifulSoup(self.driver.page_source,\"html.parser\")\n answer=soup.findAll(class_=\"_sPg\")\n if not answer:\n answer=soup.findAll(class_=\"_m3b\")\n print(answer[0].get_text())\n\n\n\n\n\n" }, { "alpha_fraction": 0.6483516693115234, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 20.254901885986328, "blob_id": "f7680a32b1a7f3f800e71c336fa4894a637703bb", "content_id": "4cde4479e893829d336e32771efd692f53ac74f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1092, "license_type": "no_license", "max_line_length": 132, "num_lines": 51, "path": "/main.py", "repo_name": "ravikiradoo/Speech_with_python", "src_encoding": "UTF-8", "text": "import pyaudio\nimport wave\nimport speech_recognition as sr\nimport subprocess\nimport os\nfrom commands import Commander\n\ndef play_audio(filename):\n chunk=1024\n wv=wave.open(filename,\"rb\")\n pa=pyaudio.PyAudio()\n\n stream=pa.open(format=pa.get_format_from_width(wv.getsampwidth()),channels=wv.getnchannels(),rate=wv.getframerate(),output=True)\n\n data_stream=wv.readframes(chunk)\n\n while data_stream:\n stream.write(data_stream)\n data_stream=wv.readframes(chunk)\n\n stream.close()\n pa.terminate()\n\nr=sr.Recognizer()\ncmd=Commander()\nrunning=True\n\ndef initSpeech():\n play_audio(\"Audio/sms-alert-5-daniel_simon.wav\")\n\n with sr.Microphone() as source:\n print(\"Listening.......\")\n audio=r.listen(source)\n\n play_audio(\"Audio/sms-alert-5-daniel_simon.wav\")\n\n command=\"\"\n try:\n command=r.recognize_google(audio)\n except:\n print(\"Could not understand\")\n\n if command in [\"quit\",\"good bye\" , \"ok bye\", \"exit\"]:\n global running\n running=False\n else:\n cmd.Discover(command)\n\n\n\ninitSpeech()\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5947556495666504, "alphanum_fraction": 0.5947556495666504, "avg_line_length": 32.52000045776367, "blob_id": "7badc7ef8bc52ca668af50d94c23945fd20e8cf5", "content_id": "cd0552ebd5a28ec1b7b929d9af4dbdab1e34b0f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 839, "license_type": "no_license", "max_line_length": 94, "num_lines": 25, "path": "/commands.py", "repo_name": "ravikiradoo/Speech_with_python", "src_encoding": "UTF-8", "text": "import subprocess\nimport os\nfrom get_answer import Fetcher\n\nclass Commander:\n def __init__(self):\n self.confirm=[\"yes\",\"confirm\",\"sure\",\"affirmative\",\"do it\",\"yeah\"]\n self.cancel=[\"no\" \"negative\",\"don't\",\"wait\",\"cancel\"]\n\n def Discover(self,text):\n if \"what\" in text and \"your name\" in text:\n self.respond(\"Hi my name is Ravi. How Are You ?\")\n if \"How are you \" in text:\n self.respond(\"I am fine and you ?\")\n\n else:\n f=Fetcher(\"https://www.google.co.in/search?q=\"+text)\n\n\n\n def respond(self,text):\n os.chdir(\"C:\\Program Files\\Jampal\")\n file = open(r\"C:\\Users\\Student\\PycharmProjects\\Speech_with_python\\Text.txt\", \"w+\")\n file.write(text)\n os.popen(\"ptts -u \" + r\"C:\\Users\\Student\\PycharmProjects\\Speech_with_python\\Text.txt\")\n\n" } ]
3
mucahitkosgen/main
https://github.com/mucahitkosgen/main
6632182858a9cf73f8b99737869b545ceaf72d97
7c9db64e2d1f8f06cd4952223d55ac17135d6cda
d84203fa3cc1fadeb38571e181a781c77a39a4b0
refs/heads/main
2023-06-18T03:02:32.786946
2021-07-15T17:20:00
2021-07-15T17:20:00
386,373,567
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5961360335350037, "alphanum_fraction": 0.603245735168457, "avg_line_length": 39.4375, "blob_id": "7fd818f7f7093f9457125141f572bf627ad70380", "content_id": "d79d2333b0e17cc0e6581f338a6559d07752ce7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6500, "license_type": "no_license", "max_line_length": 116, "num_lines": 160, "path": "/apps/parameter/models.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "import datetime\nimport json\n\nfrom ckeditor_uploader.fields import RichTextUploadingField\nfrom django.core.checks import messages\nfrom django.core.exceptions import ValidationError\nfrom django.template.defaultfilters import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db import models\nfrom ..common.oneTextField.oneField import OneTextField\nfrom ..common.mixins.audit import AuditMixin\nfrom ..common.fileUpload.userPath import userDirectoryPath\nfrom ..common.fileUpload.validate import validateFileExtension\nfrom django.contrib.auth.models import Group, User\nfrom ckeditor.fields import RichTextField\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.db.models import Q\nfrom translations.models import Translatable\nfrom django.conf import settings\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom django.utils.timezone import utc\nfrom apps.content.models import Announcement\n# Time Counter\nclass TimeCounter(OneTextField):\n summary = models.CharField(max_length=200, blank=True, verbose_name=_('özet'))\n time_counter = models.DateTimeField()\n bg_image = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('Arkaplan Görsel'))\n icon = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('İcon Görsel'))\n\n @property\n def bg_image_url(self):\n if self.bg_image and hasattr(self.bg_image, 'url'):\n return self.bg_image.url\n\n @property\n def icon_url(self):\n if self.icon and hasattr(self.icon, 'url'):\n return self.icon.url\n\n @property\n def time_counter_diff(self):\n # TODO : js ile yapılacak ,bitmedi\n now_2 = timezone.now()\n now = datetime.datetime.utcnow().replace(tzinfo=utc)\n now_3 = datetime.datetime.now()\n counter = 0\n print(\"self.time_counter.year\", self.time_counter.year)\n print(\"self.time_counter.day\", self.time_counter.day)\n print(\"self.time_counter.hour\", self.time_counter.hour)\n print(\"self.time_counter.min\", self.time_counter.min)\n print(\"self.time_counter.second\", self.time_counter.second)\n\n print(\"self.time_counter :\", self.time_counter)\n print(\"now :\", now)\n if self.time_counter > now_2:\n diff = self.time_counter - now_2\n print(\"diff :\", diff)\n print(\"diff :\", diff.days)\n print(\"diff :\", diff.min)\n print(\"diff :\", diff.seconds)\n days, seconds = diff.days, diff.seconds\n hours = days * 24 + seconds // 3600\n hours_2 = diff.total_seconds() / 3600\n hours_3 = hours % 24\n minutes = (seconds % 3600) // 60\n seconds = seconds % 60\n print(\"counter diff :\", days, hours, hours_2, hours_3, minutes, seconds)\n counter = {\"days\": days, \"hours\": hours_3, \"minutes\": minutes, \"seconds\": seconds}\n\n\n else:\n counter = {\"days\": 0, \"hours\": 0, \"minutes\": 0, \"seconds\": 0}\n\n return counter\n\n class Meta:\n verbose_name = _('Zaman Sayaç')\n verbose_name_plural = _('Zaman Sayaç')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\n# Value Counter\nclass ValueCounter(OneTextField):\n summary = models.CharField(max_length=200, blank=True, verbose_name=_('Özet'))\n counter_box = models.ManyToManyField(\"ValueCounterBox\", verbose_name=_('Sayaç Değerleri'))\n bg_image = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('Arkaplan Görsel'))\n icon = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('İcon Görsel'))\n\n @property\n def bg_image_url(self):\n if self.bg_image and hasattr(self.bg_image, 'url'):\n return self.bg_image.url\n\n @property\n def icon_url(self):\n if self.icon and hasattr(self.icon, 'url'):\n return self.icon.url\n\n class Meta:\n verbose_name = _('Değer Sayaç Section')\n verbose_name_plural = _('Değer Sayaç Section')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\n# Value Counter Box\nclass ValueCounterBox(OneTextField):\n counter = models.CharField(max_length=200, blank=True, verbose_name=_('Sayaç Değeri')) # integar veya charfield\n icon = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('İcon Görsel')) # image veya charfield (icon class tag )\n\n @property\n def icon_url(self):\n if self.icon and hasattr(self.icon, 'url'):\n return self.icon.url\n\n class Meta:\n verbose_name = _('Değer Sayaç')\n verbose_name_plural = _('Değer Sayaç')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\nclass AnnouncementSection(OneTextField):\n summary = models.CharField(max_length=200, blank=True, verbose_name=_('Özet'))\n Announcements = models.ManyToManyField(Announcement, verbose_name=_('Duyurular'))\n icon = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('İcon Görsel'))\n\n @property\n def icon_url(self):\n if self.icon and hasattr(self.icon, 'url'):\n return self.icon.url\n\n class Meta:\n # ordering = ('date',)\n verbose_name = _('Duyuru Bölümü')\n verbose_name_plural = _('Duyuru Bölümü')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n" }, { "alpha_fraction": 0.5740185976028442, "alphanum_fraction": 0.5841084718704224, "avg_line_length": 59.409523010253906, "blob_id": "454615b26273d1f8e98d6d439ae78d717c3062b8", "content_id": "b3543682c94667c6614f969bd2ff332d3a0794d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6375, "license_type": "no_license", "max_line_length": 178, "num_lines": 105, "path": "/apps/main/migrations/0001_initial.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2021-07-15 17:08\n\nimport apps.common.fileUpload.userPath\nimport ckeditor.fields\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Menu',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('view_name', models.CharField(blank=True, choices=[], max_length=200, null=True, unique=True, verbose_name='Görünüm Adı')),\n ('alignment', models.IntegerField(blank=True, null=True, unique=True, verbose_name='Sıralama')),\n ('redirect_link', models.URLField(blank=True, null=True, verbose_name='Yönlendirme Linki')),\n ],\n options={\n 'verbose_name': 'Menü',\n 'verbose_name_plural': 'Menü',\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='MenuLocation',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ],\n options={\n 'verbose_name': 'Menü Konumu',\n 'verbose_name_plural': 'Menü Konumu',\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='SiteInfo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('keywords', models.TextField(null=True, verbose_name='Etiketler')),\n ('author', models.CharField(blank=True, max_length=400, null=True, verbose_name='Sahip')),\n ('favicon', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='Favicon')),\n ('header_logo', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='Üst Logo')),\n ('footer_logo', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='Alt Logo')),\n ('address', ckeditor.fields.RichTextField(null=True, verbose_name='Adres')),\n ],\n options={\n 'verbose_name': 'Site Bilgileri',\n 'verbose_name_plural': 'Site Bilgileri',\n 'ordering': ('text',),\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='SubMenu',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('view_name', models.CharField(blank=True, choices=[], max_length=200, null=True, verbose_name='Görünüm Adı')),\n ('alignment', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Sıralama')),\n ('slug', models.SlugField(blank=True, max_length=200, null=True, unique=True, verbose_name='Slug')),\n ('topMenu', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='sub_menus', to='main.Menu', verbose_name='Menü')),\n ],\n options={\n 'verbose_name': 'Alt Menü',\n 'verbose_name_plural': 'Alt Menü',\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.AddField(\n model_name='menu',\n name='menu_location',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='menus', to='main.MenuLocation', verbose_name='Menü Konumu'),\n ),\n ]\n" }, { "alpha_fraction": 0.5972262024879456, "alphanum_fraction": 0.600213348865509, "avg_line_length": 38.38375473022461, "blob_id": "a82ff1927064f24e222a3d26d059ccf2b1f36cd1", "content_id": "f1b63607ed8c69df598f72bac5a33fee3a7f97ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14133, "license_type": "no_license", "max_line_length": 110, "num_lines": 357, "path": "/apps/content/models.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "from datetime import datetime\n\nfrom colorfield.fields import ColorField\nfrom django.contrib.sites.models import Site\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.db import models\nfrom django.http import Http404\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom apps.common.fileUpload.userPath import userDirectoryPath, image_upload_to\nfrom apps.common.oneTextField import OneTextField\nfrom ckeditor.fields import RichTextField\n\n\nclass AnnouncementCategory(OneTextField):\n color = ColorField(max_length=200, verbose_name=_('Renk Kodu'))\n\n class Meta:\n verbose_name = _('Duyuru Kategorisi')\n verbose_name_plural = _('Duyuru Kategorileri')\n\n\nclass Announcement(OneTextField):\n category = models.ForeignKey(AnnouncementCategory, verbose_name=_('Duyuru Kategorisi', ),\n related_name='announcements', on_delete=models.PROTECT)\n keywords = models.TextField(null=True, blank=True, verbose_name=_('Etiketler'))\n summary = models.CharField(max_length=400, blank=True, verbose_name=_('Özet'))\n # alignment = models.IntegerField(null=True, blank=True, unique=True, verbose_name=_('Sıralama'))\n bg_image = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('Arkaplan Görsel'))\n icon = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('İcon Görsel'))\n content = RichTextField(blank=True, verbose_name=_('İçerik'))\n is_published = models.BooleanField(default=True)\n date = models.DateTimeField()\n slug = models.SlugField(blank=False, unique=True)\n\n def get_absolute_url(self):\n return reverse('content:announcement_detail', kwargs={'slug': self.slug})\n\n @property\n def bg_image_url(self):\n if self.bg_image and hasattr(self.bg_image, 'url'):\n return self.bg_image.url\n\n @property\n def icon_url(self):\n if self.icon and hasattr(self.icon, 'url'):\n return self.icon.url\n\n class Meta:\n ordering = ('date',)\n verbose_name = _('Duyuru')\n verbose_name_plural = _('Duyuru')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\n# Slider todo atolye eklendiğinde slider ekle\nclass Slider(OneTextField):\n bg_image = models.ImageField(upload_to='slider/images', null=True, blank=True,\n verbose_name=_('Görsel'))\n slider_summary = models.CharField(max_length=400, null=True, blank=True, verbose_name=_('Slider Özet'))\n slider_content = RichTextField(null=True, blank=True, verbose_name=_('Slider Özet'))\n redirect_link = models.URLField(null=True, blank=True, verbose_name=_('Yönlendirme Linki'))\n\n is_published = models.BooleanField(default=True, verbose_name=_('Yayınlansın mı?'))\n alignment = models.IntegerField(null=True, blank=True, unique=True, verbose_name=_('Sıralama'))\n slug = models.SlugField(unique=True)\n date = models.DateTimeField()\n\n def get_absolute_url(self):\n return reverse('content:slider_detail', kwargs={'slug': self.slug})\n\n @property\n def bg_image_url(self):\n if self.bg_image and hasattr(self.bg_image, 'url'):\n return self.bg_image.url\n\n class Meta:\n verbose_name = _('Slider')\n verbose_name_plural = _('Slider')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\n# context processors\nclass WebSites(OneTextField):\n logo = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('Logo'))\n site_url = models.URLField(max_length=300, verbose_name=_('Site Url'))\n detail_info = RichTextField(null=True, blank=True)\n is_published = models.BooleanField(default=True, verbose_name='Yayına alınsın mı?')\n\n class Meta:\n verbose_name = _('Web Site')\n verbose_name_plural = _('Web Siteler')\n\n @property\n def logo_url(self):\n if self.logo and hasattr(self.logo, 'url'):\n return self.logo.url\n\n\nclass SSS(OneTextField):\n text = models.TextField(null=False, verbose_name=_('Soru'))\n answer = models.TextField(null=False, verbose_name=_('Cevap'))\n site = models.ManyToManyField(Site)\n\n def __str__(self):\n return self.text\n\n\n\n class Meta:\n verbose_name = _('Sıkça Sorular Soru')\n verbose_name_plural = _('Sıkça Sorular Sorular')\n\n\nclass NewsCastCategory(OneTextField):\n color = ColorField(max_length=200, default='#ffffff', verbose_name=_('Renk Kodu'))\n\n class Meta:\n verbose_name = _('Haber Kategorisi')\n verbose_name_plural = _('Haber Kategorileri')\n\n\nclass NewsCast(OneTextField):\n category = models.ForeignKey(NewsCastCategory, verbose_name=_('Haber Kategorisi', ),\n related_name='news_casts', on_delete=models.PROTECT)\n keywords = models.TextField(null=True, blank=True, verbose_name=_('Etiketler'))\n summary = models.CharField(max_length=400, blank=True, verbose_name=_('Özet'))\n # alignment = models.IntegerField(null=True, blank=True, unique=True, verbose_name=_('Sıralama'))\n bg_image = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('Arkaplan Görsel'))\n icon = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('İcon Görsel'))\n content = RichTextField(blank=True, verbose_name=_('İçerik'))\n is_published = models.BooleanField(default=True)\n date = models.DateTimeField()\n slug = models.SlugField(blank=False, unique=True)\n\n @property\n def bg_image_url(self):\n if self.bg_image and hasattr(self.bg_image, 'url'):\n return self.bg_image.url\n\n @property\n def icon_url(self):\n if self.icon and hasattr(self.icon, 'url'):\n return self.icon.url\n\n def get_absolute_url(self):\n return reverse('content:news_cast_detail', kwargs={'slug': self.slug}) # new\n\n class Meta:\n verbose_name = _('Haber')\n verbose_name_plural = _('Haberler')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\nclass MemberLevel(OneTextField):\n LEVEL = (\n ('BD', _('Board of Directors')),\n ('BT', _('Board of Trustees')),\n )\n level = models.CharField(max_length=200, blank=False, choices=LEVEL, verbose_name=_('Ekipteki Konumu'))\n\n class Meta:\n ordering = ('created_at',)\n verbose_name = _('Üyenin Konumu')\n verbose_name_plural = _('Üye Konumları')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\n#\n\nclass TeamMember(OneTextField):\n first_name = models.CharField(max_length=200, blank=False, verbose_name=_('Adı'))\n last_name = models.CharField(max_length=200, blank=False, verbose_name=_('Soyadı'))\n level = models.ManyToManyField(MemberLevel, max_length=5, related_name='members', blank=False,\n verbose_name=_('Ekipteki Konumu'))\n biography = models.TextField(blank=False)\n twitter = models.URLField(null=True, blank=True, verbose_name=_('Twitter'))\n instagram = models.URLField(null=True, blank=True, verbose_name=_('Instagram'))\n linkedin = models.URLField(null=True, blank=True, verbose_name=_('Linkedin'))\n facebook = models.URLField(null=True, blank=True, verbose_name=_('Facebook'))\n email = models.URLField(null=True, blank=True, verbose_name=_('Email'))\n profile_photo = models.ImageField(upload_to='teams/member_photo', blank=False)\n slug = models.SlugField(blank=True, )\n\n def __str__(self):\n return f'{self.first_name} {self.last_name}'\n\n def get_absolute_url(self):\n return reverse('content:member_detail', kwargs={'slug': self.slug}) # new\n\n def photo(self):\n if self.profile_photo and hasattr(self.profile_photo, 'url'):\n return self.profile_photo.url\n\n def level_list(self):\n levels = self.level\n\n list = '/ '.join(level.get_level_display() for level in levels.all())\n\n return list\n\n class Meta:\n ordering = ('created_at',)\n verbose_name = _('Ekip Üyesi')\n verbose_name_plural = _('Ekip Üyeleri')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\nclass Activity(OneTextField):\n creator = models.CharField(max_length=300, blank=False, verbose_name=_('Oluşturan'))\n image = models.ImageField(verbose_name=_('Afiş'))\n summary = models.CharField(max_length=400, blank=True, verbose_name=_('Özet'))\n content = RichTextField(verbose_name=_('Etkinlik İçeriği'))\n date = models.DateTimeField(verbose_name=_('Etkinlik Tarihi'))\n slug = models.SlugField(unique=True)\n is_published = models.BooleanField(default=True, verbose_name=_('Yayınlansın mı?'))\n\n def get_absolute_url(self):\n return reverse('content:activity_detail', kwargs={'slug': self.slug}) # new\n\n def get_image_url(self):\n if self.image and hasattr(self.image, 'url'):\n return self.image.url\n\n class Meta:\n ordering = ('created_at',)\n verbose_name = _('Aktivite')\n verbose_name_plural = _('Aktiviteler')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\nclass Album(OneTextField):\n date = models.DateTimeField(verbose_name=_('Albüm Tarihi'))\n about = RichTextField(verbose_name=_('Albüm Hakkında'), null=True, blank=True)\n image = models.ImageField(upload_to='albums', verbose_name=_('Albüm Kapağı'), null=True, blank=True)\n slug = models.SlugField(unique=True)\n is_published = models.BooleanField(default=True, verbose_name=_('Yayınlansın mı?'))\n\n def get_absolute_url(self):\n return reverse('content:album_detail', kwargs={'slug': self.slug})\n\n def get_image_url(self):\n if self.image and hasattr(self.image, 'url'):\n return self.image.url\n\n def get_images(self):\n images = AlbumImage.objects.filter(album=self)\n return images\n\n class Meta:\n ordering = ('date',)\n verbose_name = _('Albüm')\n verbose_name_plural = _('Albümler')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\ndef image_upload_to(instance, filename):\n now = datetime.now()\n path = \"images/{year}/{month}/{day}/{model}/{filename}\".format(\n year=now.year,\n month=now.month,\n day=now.day,\n model=instance.album.text,\n filename=filename\n )\n return path\n\n\nclass AlbumImage(OneTextField):\n text = models.CharField(max_length=100, null=True, blank=True, verbose_name=_('Başlık'))\n album = models.ForeignKey(Album, related_name='images', verbose_name=_('Albüm'), on_delete=models.CASCADE)\n image = models.ImageField(upload_to=image_upload_to)\n\n def __str__(self):\n return self.image.name\n\n def get_image_url(self):\n if self.image and hasattr(self.image, 'url'):\n return self.image.url\n\n class Meta:\n ordering = ('created_at',)\n verbose_name = _('Resim')\n verbose_name_plural = _('Resimler')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\nclass CustomPage(OneTextField):\n menu = models.OneToOneField('main.SubMenu', on_delete=models.PROTECT, verbose_name=_('Menü'))\n content = RichTextField(null=False, blank=False)\n slug = models.SlugField(unique=True)\n album = models.OneToOneField(Album, null=True, blank=True, on_delete=models.PROTECT)\n\n class Meta:\n ordering = ('created_at',)\n verbose_name = _('Özel Sayfa')\n verbose_name_plural = _('Özel Sayfalar')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\nclass KVKK(OneTextField):\n text = RichTextField(verbose_name=_('KVKK Metni'))\n\n def __str__(self):\n return self.text[:20]\n\n class Meta:\n ordering = ('created_at',)\n verbose_name = _('KVKK')\n verbose_name_plural = _('KVKK')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n" }, { "alpha_fraction": 0.6796280741691589, "alphanum_fraction": 0.6796280741691589, "avg_line_length": 27.16666603088379, "blob_id": "7146a7e3b94a8737d11e334839d004ac4c3560ab", "content_id": "0404acdb343c9f4ce73c3d60d033d19c792549e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1193, "license_type": "no_license", "max_line_length": 102, "num_lines": 42, "path": "/apps/main/admin.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.core.exceptions import ValidationError\nfrom django.forms import ModelForm\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .models import *\n\n\nclass SubMenuAdmin(admin.ModelAdmin):\n list_display = ('text',)\n readonly_fields = ('slug',)\n\n\nclass CustomSubMenuAdmin(admin.ModelAdmin):\n list_display = ('text',)\n prepopulated_fields = {'slug': ('text',), }\n # inlines = [CustomPageTabularInline]\n\n\nclass MenuForm(ModelForm):\n\n def clean(self):\n cleaned_data = super().clean()\n link_value = cleaned_data.get('view_name')\n redirect_link_value = cleaned_data.get('redirect_link')\n if link_value and redirect_link_value:\n # self.add_error('redirect_link', _('Url ve Yönlendirme Linki aynı anda kullanılamaz.'), )\n\n raise ValidationError(\n _('Görsünüm adı ve Yönlendirme Linki aynı anda kullanılamaz.'),\n code='invalid',\n )\n\n\nclass MenuAdmin(admin.ModelAdmin):\n form = MenuForm\n\n\nadmin.site.register(Menu, MenuAdmin)\nadmin.site.register(SubMenu, SubMenuAdmin)\nadmin.site.register(SiteInfo)\nadmin.site.register(MenuLocation)\n" }, { "alpha_fraction": 0.6316614151000977, "alphanum_fraction": 0.6379310488700867, "avg_line_length": 44.57143020629883, "blob_id": "885e3bd4e7643a99ad45aabdf15b6ff869961291", "content_id": "6c3e06553a3d1b18639453f1b052224c007e5379", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 638, "license_type": "no_license", "max_line_length": 118, "num_lines": 14, "path": "/apps/content/urls.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "from django.conf.urls.i18n import i18n_patterns\nfrom django.urls import path, include\n\nfrom apps.content.views import (AnnouncementDetailView, AnnouncementListView,\n NewsCastListView, NewsCastDetailView, OurTeamListView, OurTeamDetailView, SSSListView,\n ActivityListView, ActivityDetailView, AlbumListView, AlbumDetailView, user_agreement,\n privacy_policy, SliderDetailView, communication, kvkk,\n )\n\napp_name = \"content\"\nurlpatterns = [\n\n #path('', include(('deneyap.urls', 'deneyap'), namespace='deneyap')),\n]\n" }, { "alpha_fraction": 0.6101089119911194, "alphanum_fraction": 0.6127092242240906, "avg_line_length": 45.6136360168457, "blob_id": "a583ad6a7802d647669b2578cf44fac8675fda85", "content_id": "c32ba378c0ba15e9d2769d142760eb08045024f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6183, "license_type": "no_license", "max_line_length": 113, "num_lines": 132, "path": "/apps/main/models.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "from ckeditor.fields import RichTextField\nfrom django.core.validators import MinValueValidator\nfrom django.db import models\nfrom django.urls import reverse\n\nfrom django.utils.translation import gettext_lazy as _\n\nfrom apps.common.fileUpload.userPath import userDirectoryPath\nfrom apps.common.oneTextField import OneTextField\nfrom .urlpatterns import urls, validate_slug\nfrom apps.content.models import Announcement\nfrom apps.main.urlpatterns import urls\n\n\nclass SiteInfo(OneTextField):\n keywords = models.TextField(null=True, verbose_name=_('Etiketler'))\n author = models.CharField(max_length=400, null=True, blank=True, verbose_name=_('Sahip'))\n favicon = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('Favicon'))\n header_logo = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('Üst Logo'))\n footer_logo = models.ImageField(upload_to=userDirectoryPath, null=True, blank=True,\n verbose_name=_('Alt Logo'))\n address = RichTextField(null=True, verbose_name=_('Adres'))\n\n # phone_list = RichTextField(null=True, verbose_name=_('Telefonlar'))\n # footer_address = models.TextField(null=True, verbose_name=_('Footer Adres'))\n # copyright = models.TextField(null=True, verbose_name=_('Telif\"'))\n # phone = models.CharField(max_length=400, null=True, blank=True, verbose_name=_('Telefon'))\n # facebook = models.URLField(null=True, blank=True, verbose_name=_('Facebook'))\n # twitter = models.URLField(null=True, blank=True, verbose_name=_('Twitter'))\n # instagram = models.URLField(null=True, blank=True, verbose_name=_('Instagram'))\n # telegram = models.URLField(null=True, blank=True, verbose_name=_('Telegram'))\n # bip = models.URLField(null=True, blank=True, verbose_name=_('Bip'))\n # whatsapp = models.URLField(null=True, blank=True, verbose_name=_('Whatsapp'))\n # youtube = models.URLField(null=True, blank=True, verbose_name=_('Youtube'))\n # linkedin = models.URLField(null=True, blank=True, verbose_name=_('Linkedin'))\n # email = models.EmailField(null=True, blank=True, verbose_name=_('Email'))\n # site_url = models.URLField(null=True, blank=True, verbose_name=_('Site Url'))\n # hours_of_service = RichTextField(null=True, blank=True, verbose_name=_('Hizmet Saati'))\n # user_agreement = RichTextField(null=True, blank=True, verbose_name=_('Kullanıcı Sözleşmesi'))\n # privacy_policy = RichTextField(null=True, blank=True, verbose_name=_('Gizlilik Politikası'))\n\n @property\n def favicon_url(self):\n if self.favicon and hasattr(self.favicon, 'url'):\n return self.favicon.url\n\n @property\n def header_logo_url(self):\n if self.header_logo and hasattr(self.header_logo, 'url'):\n return self.header_logo.url\n\n @property\n def footer_logo_url(self):\n if self.footer_logo and hasattr(self.footer_logo, 'url'):\n return self.footer_logo.url\n\n class Meta:\n ordering = ('text',)\n verbose_name = _('Site Bilgileri')\n verbose_name_plural = _('Site Bilgileri')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\nclass MenuLocation(OneTextField):\n class Meta:\n verbose_name = _('Menü Konumu')\n verbose_name_plural = _('Menü Konumu')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\nimport operator\nfrom itertools import chain\n\n\n# Menus\nclass Menu(OneTextField):\n menu_location = models.ForeignKey(MenuLocation, null=True, blank=True, on_delete=models.PROTECT,\n related_name='menus',\n verbose_name=_(\"Menü Konumu\"))\n\n view_name = models.CharField(max_length=200, unique=True, choices=urls,\n blank=True, null=True, verbose_name=\"Görünüm Adı\")\n alignment = models.IntegerField(null=True, blank=True, unique=True, verbose_name=_('Sıralama'))\n redirect_link = models.URLField(null=True, blank=True, verbose_name=_('Yönlendirme Linki'))\n\n def menu_list(self):\n sub_menu_list = SubMenu.objects.filter(topMenu=self)\n return sub_menu_list\n\n class Meta:\n verbose_name = _('Menü')\n verbose_name_plural = _('Menü')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n\n\n# bu modeli kullanan modeller admin panelinde konfügre edilmelidir\nclass SubMenu(OneTextField):\n topMenu = models.ForeignKey(Menu, on_delete=models.PROTECT, verbose_name=_(\"Menü\"), related_name='sub_menus')\n view_name = models.CharField(max_length=200, null=True, blank=True, choices=urls,\n verbose_name=\"Görünüm Adı\", )\n alignment = models.IntegerField(null=True, validators=[MinValueValidator(0)], blank=True,\n verbose_name=_('Sıralama'))\n slug = models.SlugField(max_length=200, unique=True, null=True, blank=True, verbose_name=\"Slug\", )\n\n def get_absolute_url(self):\n return reverse(self.view_name, kwargs={'slug': self.slug})\n\n def __str__(self):\n return self.text\n\n class Meta:\n verbose_name = _('Alt Menü')\n verbose_name_plural = _('Alt Menü')\n default_permissions = ()\n permissions = ((_('liste'), _('Listeleme Yetkisi')),\n (_('sil'), _('Silme Yetkisi')),\n (_('ekle'), _('Ekleme Yetkisi')),\n (_('guncelle'), _('Güncelleme Yetkisi')))\n" }, { "alpha_fraction": 0.5745782256126404, "alphanum_fraction": 0.5857691764831543, "avg_line_length": 60.721649169921875, "blob_id": "9a6811606fd8922ce30bccf80d38d7e8571c74cf", "content_id": "bb68c20ca4941ce8bd39a09f7222272b7e26eb66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6032, "license_type": "no_license", "max_line_length": 164, "num_lines": 97, "path": "/apps/parameter/migrations/0001_initial.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2021-07-15 17:08\n\nimport apps.common.fileUpload.userPath\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('content', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TimeCounter',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('summary', models.CharField(blank=True, max_length=200, verbose_name='özet')),\n ('time_counter', models.DateTimeField()),\n ('bg_image', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='Arkaplan Görsel')),\n ('icon', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='İcon Görsel')),\n ],\n options={\n 'verbose_name': 'Zaman Sayaç',\n 'verbose_name_plural': 'Zaman Sayaç',\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='ValueCounterBox',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('counter', models.CharField(blank=True, max_length=200, verbose_name='Sayaç Değeri')),\n ('icon', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='İcon Görsel')),\n ],\n options={\n 'verbose_name': 'Değer Sayaç',\n 'verbose_name_plural': 'Değer Sayaç',\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='ValueCounter',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('summary', models.CharField(blank=True, max_length=200, verbose_name='Özet')),\n ('bg_image', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='Arkaplan Görsel')),\n ('icon', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='İcon Görsel')),\n ('counter_box', models.ManyToManyField(to='parameter.ValueCounterBox', verbose_name='Sayaç Değerleri')),\n ],\n options={\n 'verbose_name': 'Değer Sayaç Section',\n 'verbose_name_plural': 'Değer Sayaç Section',\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='AnnouncementSection',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('summary', models.CharField(blank=True, max_length=200, verbose_name='Özet')),\n ('icon', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='İcon Görsel')),\n ('Announcements', models.ManyToManyField(to='content.Announcement', verbose_name='Duyurular')),\n ],\n options={\n 'verbose_name': 'Duyuru Bölümü',\n 'verbose_name_plural': 'Duyuru Bölümü',\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6506778001785278, "alphanum_fraction": 0.6600625514984131, "avg_line_length": 24.236841201782227, "blob_id": "6da14047f68921ca74fa2cd6b0522a36c84ce86d", "content_id": "e851f536ca5cc4b52e7d557dd33fc1c7a5634da9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 959, "license_type": "no_license", "max_line_length": 67, "num_lines": 38, "path": "/apps/common/fileUpload/userPath.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "import os\nfrom datetime import datetime\n\nfrom django.utils.crypto import get_random_string\n\n\ndef userDirectoryPath(instance, filename):\n name = str(filename)\n ext = os.path.splitext(name)[1] # [0] returns path+filename\n\n newName = get_random_string(length=32) + ext\n\n # file will be uploaded to MEDIA_ROOT/user_<id>/<filename>\n\n return 'upload/userFormUpload/{0}'.format(newName)\n\n\ndef upload_to(instance, filename):\n name = str(filename)\n ext = os.path.splitext(name)[1] # [0] returns path+filename\n\n new_name = get_random_string(length=32) + ext\n\n # file will be uploaded to MEDIA_ROOT/user_<id>/<filename>\n\n return 'upload/'.format(new_name)\n\n\ndef image_upload_to(instance, filename):\n now = datetime.now()\n path = \"images/{year}/{month}/{day}/{model}/{filename}\".format(\n year=now.year,\n month=now.month,\n day=now.day,\n model=instance.text,\n filename=filename\n )\n return path\n" }, { "alpha_fraction": 0.792792797088623, "alphanum_fraction": 0.8198198080062866, "avg_line_length": 53.5, "blob_id": "e3f7b7d4f7071a8f46ae68e0a84010436173fa85", "content_id": "feb733f8bf492eba725377b26d58f37d0ff11e3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "no_license", "max_line_length": 64, "num_lines": 2, "path": "/apps/main/views.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic import DetailView\n\n\n" }, { "alpha_fraction": 0.695067286491394, "alphanum_fraction": 0.695067286491394, "avg_line_length": 29.409090042114258, "blob_id": "75e38df5ca9f24c0d82bf7e260ccf719e3e262dd", "content_id": "9328516649017e8f0368e1e38e25704715b7b3b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2007, "license_type": "no_license", "max_line_length": 119, "num_lines": 66, "path": "/apps/content/admin.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\n# Register your models here.\nfrom apps.content.models import (Announcement, Slider, WebSites, SSS, AnnouncementCategory, NewsCast, NewsCastCategory,\n Activity, Album, AlbumImage, CustomPage, KVKK, )\n\n\nclass AlbumImageInline(admin.TabularInline):\n model = AlbumImage\n prepopulated_fields = {'text': ('image',)}\n\n\nclass AlbumAdmin(admin.ModelAdmin):\n list_display = ('text',)\n prepopulated_fields = {'slug': ('text',), }\n inlines = [AlbumImageInline]\n\n\nclass AnnouncementAdmin(admin.ModelAdmin):\n list_display = ('text', 'category',)\n prepopulated_fields = {'slug': ('text',), }\n\n\nclass NewsCastAdmin(admin.ModelAdmin):\n prepopulated_fields = {'slug': ('text',), }\n\n\n# Register your models here.\nclass TeamMemberAdmin(admin.ModelAdmin):\n list_display = ('first_name',)\n prepopulated_fields = {'slug': ('first_name', 'last_name',), }\n\n\nclass ActivityAdmin(admin.ModelAdmin):\n list_display = ('text',)\n prepopulated_fields = {'slug': ('text',), }\n\n\nclass CustomPageAdmin(admin.ModelAdmin):\n list_display = ('text',)\n prepopulated_fields = {\n 'slug': ('text',),\n }\n\n def save_model(self, request, obj, form, change):\n obj.menu.view_name = 'content:deneyap:custom_page'\n obj.menu.slug = obj.slug\n obj.menu.save()\n super(CustomPageAdmin, self).save_model(request, obj, form, change)\n\nclass SliderAdmin(admin.ModelAdmin):\n list_display = ('text',)\n prepopulated_fields = {'slug': ('text',), }\n\nadmin.site.register(Announcement, AnnouncementAdmin)\nadmin.site.register(NewsCast, NewsCastAdmin)\nadmin.site.register(AnnouncementCategory)\nadmin.site.register(NewsCastCategory)\nadmin.site.register(Slider,SliderAdmin)\nadmin.site.register(WebSites)\nadmin.site.register(SSS)\nadmin.site.register(CustomPage, CustomPageAdmin)\n# admin.site.register(AlbumImage)\nadmin.site.register(Album, AlbumAdmin)\nadmin.site.register(Activity, ActivityAdmin)\nadmin.site.register(KVKK)\n" }, { "alpha_fraction": 0.5592124462127686, "alphanum_fraction": 0.5678812861442566, "avg_line_length": 62.213623046875, "blob_id": "fb8eb4d2ce877d81bf9ca047754cfcf439a62cf0", "content_id": "1798aa9271ba8d14eaaa1f90fac60f28cc1296f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20511, "license_type": "no_license", "max_line_length": 192, "num_lines": 323, "path": "/apps/content/migrations/0001_initial.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "# Generated by Django 3.0.8 on 2021-07-15 17:08\n\nimport apps.common.fileUpload.userPath\nimport apps.content.models\nimport ckeditor.fields\nimport colorfield.fields\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('sites', '0002_alter_domain_unique'),\n ('main', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Activity',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('creator', models.CharField(max_length=300, verbose_name='Oluşturan')),\n ('image', models.ImageField(upload_to='', verbose_name='Afiş')),\n ('summary', models.CharField(blank=True, max_length=400, verbose_name='Özet')),\n ('content', ckeditor.fields.RichTextField(verbose_name='Etkinlik İçeriği')),\n ('date', models.DateTimeField(verbose_name='Etkinlik Tarihi')),\n ('slug', models.SlugField(unique=True)),\n ('is_published', models.BooleanField(default=True, verbose_name='Yayınlansın mı?')),\n ],\n options={\n 'verbose_name': 'Aktivite',\n 'verbose_name_plural': 'Aktiviteler',\n 'ordering': ('created_at',),\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='Album',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('date', models.DateTimeField(verbose_name='Albüm Tarihi')),\n ('about', ckeditor.fields.RichTextField(blank=True, null=True, verbose_name='Albüm Hakkında')),\n ('image', models.ImageField(blank=True, null=True, upload_to='albums', verbose_name='Albüm Kapağı')),\n ('slug', models.SlugField(unique=True)),\n ('is_published', models.BooleanField(default=True, verbose_name='Yayınlansın mı?')),\n ],\n options={\n 'verbose_name': 'Albüm',\n 'verbose_name_plural': 'Albümler',\n 'ordering': ('date',),\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='AnnouncementCategory',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('color', colorfield.fields.ColorField(default='#FFFFFF', max_length=200, verbose_name='Renk Kodu')),\n ],\n options={\n 'verbose_name': 'Duyuru Kategorisi',\n 'verbose_name_plural': 'Duyuru Kategorileri',\n },\n ),\n migrations.CreateModel(\n name='KVKK',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', ckeditor.fields.RichTextField(verbose_name='KVKK Metni')),\n ],\n options={\n 'verbose_name': 'KVKK',\n 'verbose_name_plural': 'KVKK',\n 'ordering': ('created_at',),\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='MemberLevel',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('level', models.CharField(choices=[('BD', 'Board of Directors'), ('BT', 'Board of Trustees')], max_length=200, verbose_name='Ekipteki Konumu')),\n ],\n options={\n 'verbose_name': 'Üyenin Konumu',\n 'verbose_name_plural': 'Üye Konumları',\n 'ordering': ('created_at',),\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='NewsCastCategory',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('color', colorfield.fields.ColorField(default='#ffffff', max_length=200, verbose_name='Renk Kodu')),\n ],\n options={\n 'verbose_name': 'Haber Kategorisi',\n 'verbose_name_plural': 'Haber Kategorileri',\n },\n ),\n migrations.CreateModel(\n name='Slider',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('bg_image', models.ImageField(blank=True, null=True, upload_to='slider/images', verbose_name='Görsel')),\n ('slider_summary', models.CharField(blank=True, max_length=400, null=True, verbose_name='Slider Özet')),\n ('slider_content', ckeditor.fields.RichTextField(blank=True, null=True, verbose_name='Slider Özet')),\n ('redirect_link', models.URLField(blank=True, null=True, verbose_name='Yönlendirme Linki')),\n ('is_published', models.BooleanField(default=True, verbose_name='Yayınlansın mı?')),\n ('alignment', models.IntegerField(blank=True, null=True, unique=True, verbose_name='Sıralama')),\n ('slug', models.SlugField(unique=True)),\n ('date', models.DateTimeField()),\n ],\n options={\n 'verbose_name': 'Slider',\n 'verbose_name_plural': 'Slider',\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='WebSites',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('logo', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='Logo')),\n ('site_url', models.URLField(max_length=300, verbose_name='Site Url')),\n ('detail_info', ckeditor.fields.RichTextField(blank=True, null=True)),\n ('is_published', models.BooleanField(default=True, verbose_name='Yayına alınsın mı?')),\n ],\n options={\n 'verbose_name': 'Web Site',\n 'verbose_name_plural': 'Web Siteler',\n },\n ),\n migrations.CreateModel(\n name='TeamMember',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('first_name', models.CharField(max_length=200, verbose_name='Adı')),\n ('last_name', models.CharField(max_length=200, verbose_name='Soyadı')),\n ('biography', models.TextField()),\n ('twitter', models.URLField(blank=True, null=True, verbose_name='Twitter')),\n ('instagram', models.URLField(blank=True, null=True, verbose_name='Instagram')),\n ('linkedin', models.URLField(blank=True, null=True, verbose_name='Linkedin')),\n ('facebook', models.URLField(blank=True, null=True, verbose_name='Facebook')),\n ('email', models.URLField(blank=True, null=True, verbose_name='Email')),\n ('profile_photo', models.ImageField(upload_to='teams/member_photo')),\n ('slug', models.SlugField(blank=True)),\n ('level', models.ManyToManyField(max_length=5, related_name='members', to='content.MemberLevel', verbose_name='Ekipteki Konumu')),\n ],\n options={\n 'verbose_name': 'Ekip Üyesi',\n 'verbose_name_plural': 'Ekip Üyeleri',\n 'ordering': ('created_at',),\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='SSS',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.TextField(verbose_name='Soru')),\n ('answer', models.TextField(verbose_name='Cevap')),\n ('site', models.ManyToManyField(to='sites.Site')),\n ],\n options={\n 'verbose_name': 'Sıkça Sorular Soru',\n 'verbose_name_plural': 'Sıkça Sorular Sorular',\n },\n ),\n migrations.CreateModel(\n name='NewsCast',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('keywords', models.TextField(blank=True, null=True, verbose_name='Etiketler')),\n ('summary', models.CharField(blank=True, max_length=400, verbose_name='Özet')),\n ('bg_image', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='Arkaplan Görsel')),\n ('icon', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='İcon Görsel')),\n ('content', ckeditor.fields.RichTextField(blank=True, verbose_name='İçerik')),\n ('is_published', models.BooleanField(default=True)),\n ('date', models.DateTimeField()),\n ('slug', models.SlugField(unique=True)),\n ('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='news_casts', to='content.NewsCastCategory', verbose_name='Haber Kategorisi')),\n ],\n options={\n 'verbose_name': 'Haber',\n 'verbose_name_plural': 'Haberler',\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='CustomPage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('content', ckeditor.fields.RichTextField()),\n ('slug', models.SlugField(unique=True)),\n ('album', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='content.Album')),\n ('menu', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to='main.SubMenu', verbose_name='Menü')),\n ],\n options={\n 'verbose_name': 'Özel Sayfa',\n 'verbose_name_plural': 'Özel Sayfalar',\n 'ordering': ('created_at',),\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='Announcement',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(max_length=200, null=True, verbose_name='Başlık')),\n ('keywords', models.TextField(blank=True, null=True, verbose_name='Etiketler')),\n ('summary', models.CharField(blank=True, max_length=400, verbose_name='Özet')),\n ('bg_image', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='Arkaplan Görsel')),\n ('icon', models.ImageField(blank=True, null=True, upload_to=apps.common.fileUpload.userPath.userDirectoryPath, verbose_name='İcon Görsel')),\n ('content', ckeditor.fields.RichTextField(blank=True, verbose_name='İçerik')),\n ('is_published', models.BooleanField(default=True)),\n ('date', models.DateTimeField()),\n ('slug', models.SlugField(unique=True)),\n ('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='announcements', to='content.AnnouncementCategory', verbose_name='Duyuru Kategorisi')),\n ],\n options={\n 'verbose_name': 'Duyuru',\n 'verbose_name_plural': 'Duyuru',\n 'ordering': ('date',),\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n migrations.CreateModel(\n name='AlbumImage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('created_by', models.CharField(blank=True, editable=False, max_length=100, null=True)),\n ('updated_by', models.CharField(blank=True, editable=False, max_length=255, null=True)),\n ('text', models.CharField(blank=True, max_length=100, null=True, verbose_name='Başlık')),\n ('image', models.ImageField(upload_to=apps.content.models.image_upload_to)),\n ('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='content.Album', verbose_name='Albüm')),\n ],\n options={\n 'verbose_name': 'Resim',\n 'verbose_name_plural': 'Resimler',\n 'ordering': ('created_at',),\n 'permissions': (('liste', 'Listeleme Yetkisi'), ('sil', 'Silme Yetkisi'), ('ekle', 'Ekleme Yetkisi'), ('guncelle', 'Güncelleme Yetkisi')),\n 'default_permissions': (),\n },\n ),\n ]\n" }, { "alpha_fraction": 0.6525974273681641, "alphanum_fraction": 0.6688311696052551, "avg_line_length": 27, "blob_id": "e92db06c822efd99603bd44cf53187d3887e1eb5", "content_id": "c5cd9bfa9dda2b59005c65f23d692384b7c29d6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 75, "num_lines": 11, "path": "/T3/urls.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "from django.conf.urls.i18n import i18n_patterns\nfrom django.urls import path\n\nfrom .views import *\n\napp_name = 'T3'\nurlpatterns = [\n # path('', main, name='main-page'),\n # path('corporate/<slug:slug>/', corporate_page, name='corporate_page'),\n # path('<slug:slug>/', custom_page, name='custom_page')\n]\n" }, { "alpha_fraction": 0.6995097994804382, "alphanum_fraction": 0.7022058963775635, "avg_line_length": 27.93617057800293, "blob_id": "66d9a6cde6f88373aa8c1a1626ec623a294b6d76", "content_id": "3af58ea81872b944f906f3159faad6ee84f96d03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4080, "license_type": "no_license", "max_line_length": 92, "num_lines": 141, "path": "/apps/content/views.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "from django.contrib.sites.shortcuts import get_current_site\nfrom django.core.paginator import Paginator\nfrom django.http import Http404\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.generic import ListView, DetailView\n\n# Create your views here.\nfrom core import settings\nfrom .models import Announcement, SSS, NewsCast, TeamMember, Activity, Slider, Album, KVKK\n\n\nclass AnnouncementListView(ListView):\n model = Announcement\n paginate_by = 6\n template_name = 'content/announcement.html'\n context_object_name = 'announcements'\n\n def get_queryset(self):\n announcement = Announcement.objects.filter(is_published=True).order_by('created_at')\n return announcement\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super(AnnouncementListView, self).get_context_data(**kwargs)\n context['similar_calls'] = Announcement.objects.filter(is_published=True)[:5]\n return context\n\n\nclass AnnouncementDetailView(DetailView):\n model = Announcement\n template_name = 'content/announcement_detail.html'\n\n def get_context_data(self, **kwargs):\n context = super(AnnouncementDetailView, self).get_context_data(**kwargs)\n context['similar_calls'] = Announcement.objects.filter(is_published=True)[:5]\n return context\n\n\nclass SSSListView(ListView):\n model = SSS\n template_name = 'content/sss.html'\n context_object_name = 'ssss'\n queryset = SSS.objects.all().order_by('created_at')\n\n\n\n\n\n\nclass OurTeamListView(ListView):\n model = TeamMember\n template_name = 'content/member.html'\n context_object_name = 'all_member'\n\n def get_queryset(self):\n all_member = TeamMember.objects.all()\n return all_member\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super(OurTeamListView, self).get_context_data(**kwargs)\n context['bd'] = TeamMember.objects.filter(level__level='BD')\n context['bt'] = TeamMember.objects.filter(level__level='BT')\n return context\n\n\nclass OurTeamDetailView(DetailView):\n template_name = 'content/member_detail.html'\n model = TeamMember\n context_object_name = 'member'\n\n\nclass NewsCastListView(ListView):\n model = NewsCast\n template_name = 'content/news_casts.html'\n context_object_name = 'news_casts'\n paginate_by = 6\n\n def get_queryset(self):\n news_casts = NewsCast.objects.filter(is_published=True).order_by('created_at')\n return news_casts\n\n\nclass NewsCastDetailView(DetailView):\n model = NewsCast\n template_name = 'content/news_cast_detail.html'\n context_object_name = 'news_cast'\n\n def get_context_data(self, **kwargs):\n context = super(NewsCastDetailView, self).get_context_data(**kwargs)\n context['similar_news_casts'] = NewsCast.objects.filter(is_published=True)[:5]\n return context\n\n\nclass ActivityListView(ListView):\n model = Activity\n template_name = 'content/activities.html'\n\n def get_queryset(self):\n queryset = Activity.objects.filter(is_published=True).order_by('-created_at')\n return queryset\n\n\nclass ActivityDetailView(DetailView):\n model = Activity\n template_name = 'content/activity_detail.html'\n\n\nclass AlbumListView(ListView):\n model = Album\n template_name = 'content/albums.html'\n\n def get_queryset(self):\n queryset = Album.objects.filter(is_published=True).order_by('-created_at')\n return queryset\n\n\nclass AlbumDetailView(DetailView):\n model = Album\n template_name = 'content/album_detail.html'\n\n\nclass SliderDetailView(DetailView):\n model = Slider\n template_name = 'content/slider_detail.html'\n context_object_name = 'slider'\n\n\ndef privacy_policy(request):\n return render(request, 'content/privacy_policy.html')\n\n\ndef user_agreement(request):\n return render(request, 'content/user_agreement.html')\n\n\ndef communication(request):\n return render(request, 'content/communication.html')\n\n\ndef kvkk(request):\n kvkk = KVKK.objects.last()\n return render(request, 'content/kvkk.html', {'kvkk': kvkk})\n" }, { "alpha_fraction": 0.8174387216567993, "alphanum_fraction": 0.8174387216567993, "avg_line_length": 29.5, "blob_id": "548d3b9433a501fd60824025b0031d62c5a1d997", "content_id": "9a2715295600fe5e2b2092b3acfa935aa838db28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 367, "license_type": "no_license", "max_line_length": 81, "num_lines": 12, "path": "/apps/parameter/admin.py", "repo_name": "mucahitkosgen/main", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom translations.admin import TranslatableAdmin, TranslationInline\n\nfrom apps.parameter.models import TimeCounter, ValueCounter, ValueCounterBox, \\\n AnnouncementSection\n#\n# admin.site.register(TimeCounter)\n# admin.site.register(ValueCounter)\n# admin.site.register(ValueCounterBox)\n#\n# admin.site.register(AnnouncementSection)\n\n" } ]
14
LytaNicoll/Utils
https://github.com/LytaNicoll/Utils
6972cedc7ee5582207acb64d0604712f4eb0f9c8
3555a331135f7e407c382df1b0acb8396e0bebca
903a5b19e9a6e62a22b982a855caa41a26704c31
refs/heads/master
2020-09-01T22:38:41.740551
2019-11-01T23:46:20
2019-11-01T23:46:20
219,076,345
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.78125, "alphanum_fraction": 0.78125, "avg_line_length": 31, "blob_id": "14b9f86ae140da617e732cd90bdf47d34d37afb0", "content_id": "f013431e9c64cd98c4666b3e7c7c6efa301f5db4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "permissive", "max_line_length": 55, "num_lines": 2, "path": "/README.md", "repo_name": "LytaNicoll/Utils", "src_encoding": "UTF-8", "text": "# Utils\na set of useful functions that are good for general use\n" }, { "alpha_fraction": 0.6106557250022888, "alphanum_fraction": 0.631147563457489, "avg_line_length": 19.33333396911621, "blob_id": "1109894133ece699e34df6a9959a295f25036289", "content_id": "190d87dd11ee53ce99d2301e6a33e41ac67edf70", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "permissive", "max_line_length": 46, "num_lines": 12, "path": "/setup.py", "repo_name": "LytaNicoll/Utils", "src_encoding": "UTF-8", "text": "import distutils\nfrom distutils.core import setup\n\nsetup(\n name='Utils',\n version='0.0.0',\n packages=[''],\n url='https://github.com/LytaNicoll/Utils',\n license='Apache Licence 2.0',\n author='Lyta Nicoll',\n description='Mine',\n)\n" } ]
2
limahseng/python_oop
https://github.com/limahseng/python_oop
083e469476ec2e1b1051537ffdeacca82a03c5b7
fa1468237af07f327626e155eee8d7bb393fc414
c64b9f31a5aed24dca0e94afef2e322efafb483f
refs/heads/master
2020-06-02T07:48:34.682406
2013-04-29T03:41:58
2013-04-29T03:41:58
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6101454496383667, "alphanum_fraction": 0.6214969754219055, "avg_line_length": 29.31182861328125, "blob_id": "72a0b80a64c55138fe0acf4f6b9a2c8fecb9a505", "content_id": "a54e0772c6f4f9f23e62f9d8820bed0b2e16ff94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2819, "license_type": "no_license", "max_line_length": 104, "num_lines": 93, "path": "/bankaccount.py", "repo_name": "limahseng/python_oop", "src_encoding": "UTF-8", "text": "# bankaccount.py\n\nclass Account:\n \"\"\" bank account super class \"\"\"\n\n def __init__(self, account_no, balance):\n \"\"\" constructor method \"\"\"\n self.__account_no = account_no\n self.__balance = balance\n\n def get_account_no(self):\n \"\"\" accessor method to retrieve account number \"\"\"\n return self.__account_no\n\n def get_balance(self):\n \"\"\" accessor method to retrieve balance \"\"\"\n return self.__balance\n\n## def set_balance(self, new_balance):\n## \"\"\" modifier/mutator method to update balance \"\"\"\n## self.__balance = new_balance\n\n def deposit(self, amount):\n \"\"\" modifier/mutator method to increase balance \"\"\"\n self.__balance += amount\n\n def withdraw(self, amount):\n \"\"\" modifier/mutator method to decrease balance \"\"\"\n self.__balance -= amount\n\n def display(self):\n \"\"\" helper/support method to show account info \"\"\"\n print(\"Account No:\", self.__account_no)\n print(\"Balance:\", self.__balance)\n\n\nclass SavingsAccount(Account):\n \"\"\" savings account subclass \"\"\"\n\n def __init__(self, account_no, balance, interest):\n \"\"\" subclass constructor method \"\"\"\n super().__init__(account_no, balance)\n self.__interest = interest\n\n def calc_interest(self):\n \"\"\" helper/support method to compute interest \"\"\"\n self.deposit(self.get_balance() * (1 + self.__interest))\n # self.__balance # illegal\n\n def display(self):\n \"\"\" helper/support method to show savings account info \"\"\"\n super().display()\n print(\"Savings interest:\", self.__interest)\n\n\nclass CurrentAccount(Account):\n \"\"\" current account subclass \"\"\"\n\n def __init__(self, account_no, balance, overdraft):\n \"\"\" subclass constructor method \"\"\"\n super().__init__(account_no, balance)\n self.__overdraft = overdraft\n\n def withdraw(self, amount): # overrides superclass withdraw\n \"\"\" helper/support method to withdraw up to overdraft limit \"\"\"\n if amount > (self.get_balance() + self.__overdraft): # cannot withdraw more than overdraft limit\n print(\"Withdrawal amount exceeds overdraft limit.\")\n else:\n super().withdraw(amount)\n \n\n def display(self):\n \"\"\" helper/support method to show current account info \"\"\"\n super().display()\n print(\"Overdraft limit:\", self.__overdraft)\n \n\n# main\nsavings_acct1 = SavingsAccount(\"C01\", 0, 0.01)\nsavings_acct1.deposit(500)\nsavings_acct1.calc_interest()\n# savings_acct1.display()\n\ncurrent_acct1 = CurrentAccount(\"C01\", 0, 500)\ncurrent_acct1.withdraw(300)\ncurrent_acct1.withdraw(300)\n# current_acct1.display()\n\naccounts = []\naccounts.append(savings_acct1)\naccounts.append(current_acct1)\nfor account in accounts:\n account.display()\n" }, { "alpha_fraction": 0.5695760846138, "alphanum_fraction": 0.576059877872467, "avg_line_length": 25.355262756347656, "blob_id": "80055c465cd2c9c5c4f411e0e81eda8b82f7dde1", "content_id": "1e2cb6338488e24eb90738f509dab42afab8d894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2005, "license_type": "no_license", "max_line_length": 53, "num_lines": 76, "path": "/person.py", "repo_name": "limahseng/python_oop", "src_encoding": "UTF-8", "text": "# person.py\n\nclass Person:\n \"\"\" person super class \"\"\"\n def __init__(self, pid, name):\n \"\"\" constructor for person \"\"\"\n self.__pid = pid\n self.__name = name\n\n def get_pid(self):\n \"\"\" accessor for person id \"\"\"\n return self.__pid\n\n def get_name(self):\n \"\"\" accessor for name \"\"\"\n return self.__name\n\n def display(self):\n \"\"\" support/helper to show person info \"\"\"\n print(\"ID:\", self.__pid)\n print(\"Name:\", self.__name)\n\n\nclass Student(Person):\n \"\"\" student subclass \"\"\"\n def __init__(self, pid, name, classid):\n \"\"\" constructor for student \"\"\"\n super().__init__(pid, name)\n self.__classid = classid\n\n def get_classid(self):\n \"\"\" accessor for student class \"\"\"\n return self.__classid\n\n def set_classid(self, new_classid):\n \"\"\" modifier/mutator for student class \"\"\"\n self.__classid = new_classid\n\n def display(self): # overriding\n \"\"\" support/helper to show student info \"\"\"\n super().display()\n print(\"Class:\", self.__classid)\n\n\nclass Staff(Person):\n \"\"\" staff subclass \"\"\"\n \n def __init__(self, pid, name, department):\n \"\"\" constructor for staff \"\"\"\n super().__init__(pid, name)\n self.__department = department\n\n def get_department(self):\n \"\"\" accessor for staff department \"\"\"\n return self.__department\n\n def set_department(self, new_department):\n \"\"\" modifier/mutator for staff department \"\"\"\n self.__department = new_department\n\n def display(self): # overriding\n \"\"\" support/helper to show staff info \"\"\"\n super().display()\n print(\"Department:\", self.__department)\n\n\n# main\n# instantiate student and staff objects\nstudent1 = Student(\"S01\", \"Lim Ah Seng\", \"13Y5C23\")\nstaff1 = Staff(\"A05\", \"Robert Yeo\", \"Computing\")\n\nperson_list = []\nperson_list.append(student1)\nperson_list.append(staff1)\nfor person in person_list:\n person.display() # polymorphism\n\n\n" } ]
2